示例#1
0
def load_keras_resources(model_path='./resources/rnn_attention_pure.model',
                         contextual_model_path='./resources/rnn_attention_context.json',
                         contextual_model_path_w='./resources/rnn_attention_context.h5',
                         tk_path='./resources/keras_tokenizer.pkl'):
    """It will loads resources of keras (contextual) model, keras tokenizer

    Args:
        model_path (str): keras model path without contxts
        contextual_model_path (str): contxtual keras model path
        contextual_model_path_w (str): weights of contxtual keras model path
        tk_path (str): path of tokenizer
    """
    kmodel = None
    kmodel_contxt = None
    ktk = None
    if os.path.isfile(model_path):
        with CustomObjectScope({'Attention': Attention}):
            kmodel = load_model(model_path)
    if os.path.isfile(contextual_model_path) and os.path.isfile(contextual_model_path_w):
        with CustomObjectScope({'Attention': Attention}):
            # load json of network architecture
            kmodel_contxt = model_from_json(
                json.dumps(json.load(open(contextual_model_path))),
                custom_objects={'Attention': Attention})
            # load its weights by using the same name
            kmodel_contxt.load_weights(contextual_model_path_w)
    if os.path.isfile(tk_path):
        ktk = pickle.load(open(tk_path, 'rb'))

    return kmodel, kmodel_contxt, ktk
示例#2
0
    def __init__(self, data_directory, model_directory):

        self.data_directory = data_directory
        self.model_directory = model_directory

        json_file = open(self.model_directory / 'similarity_model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            self.model_similarity = model_from_json(loaded_model_json,
                                                    custom_objects={
                                                        'ManDist': ManDist,
                                                        'f1_score': f1_score,
                                                        'auc': auc
                                                    })
            self.model_similarity.load_weights(self.model_directory /
                                               'similarity_model.h5')

        json_file = open(self.model_directory / 'sentiment_model.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            self.model_sentiment = model_from_json(loaded_model_json,
                                                   custom_objects={
                                                       'ManDist': ManDist,
                                                       'f1_score': f1_score,
                                                       'auc': auc
                                                   })
            self.model_sentiment.load_weights(self.model_directory /
                                              'sentiment_model.h5')

        pickle_directory = Path('pickle')
        with open(pickle_directory / 'encoder_senti.pickle', 'rb') as handle:
            self.encoder_dict = pickle.load(handle)
    def load_model_from_file(self, file_name):
        # load json and create model
        json_file = open('models/' + file_name + '.json', 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            loaded_model = model_from_json(loaded_model_json)

        # load weights into new model
        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            loaded_model.load_weights('models/' + file_name + "_weights.h5")
        print("Loaded model from disk")

        return loaded_model
    def predict(self):

        K.reset_uids()

        classes = ['Normal', 'Neumonia']

        modelo = 'neumonia/model/model_neumonia_v41.json'
        pesos = 'neumonia/model/weights_neumonia_v41.h5'

        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            with open(modelo, 'r') as f:
                model = model_from_json(f.read())
                model.load_weights(pesos)

        img = image.load_img(self.img, target_size=(150, 150))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        resultado = preds[0]  #solo una dimension

        porcentaje = np.round(resultado * 100, 2)
        porcentaje = list(porcentaje)

        respuesta = np.argmax(resultado)  # el valor mas alto de resultado

        for i in range(len(classes)):
            res = classes[i]

            if i == respuesta:
                return 'Resultado: {:.4}% {}'.format(round(max(porcentaje), 2),
                                                     res)
示例#5
0
def predict_image(image_dir_base, svm_model):
    with CustomObjectScope({'tf': tf}):
        model = load_model(
            'models/nn4.small2.lrn.h5')  # image to embedding vector 모델

        image_dir_list = os.listdir(image_dir_base)
        predict_data = []  # 예측 결과 리스트
        for class_index, dir_name in enumerate(image_dir_list):
            image_list = os.listdir(image_dir_base + os.sep + dir_name)
            for file_name in image_list:
                img_path = image_dir_base + os.sep + dir_name + os.sep + file_name

                # image to embedding vector 를 위한 데이터 변환
                img = cv2.imread(img_path)
                img = cv2.resize(img, (96, 96), interpolation=cv2.INTER_CUBIC)
                o_img = img.copy()
                img = img[..., ::-1]
                img = np.around(np.transpose(img, (0, 1, 2)) / 255.0,
                                decimals=12)
                img = np.array([img])

                v = model.predict_on_batch(img)  # image to embedding vector

                predict = svm_model.predict([
                    np.array(v).squeeze()
                ])  # 해당 embedding vector 가 어떤 결과인지 예측
                predict_data.append({
                    'predict_label': predict,
                    'real_label': class_index,
                    'image': o_img
                })

    return predict_data
    def load_model(self, path):
        print("Loading model from {}".format(path))

        self._create_submodels()

        with CustomObjectScope({'int_shape': int_shape, 'tf': tf}):
            # Load Encoder
            if os.path.exists(path + "/encoder_w.h5"):
                self._encoder.load_weights(path + "/encoder_w.h5", by_name=False)
            elif os.path.exists(path + "/encoder.h5"):
                temp_model = load_model(path + "/encoder.h5")
                self._encoder.set_weights(temp_model.get_weights())
            else: 
                print("WARNING: could not load weights for 'encoder'!")

            # Load Decoder
            if os.path.exists(path + "/decoder_w.h5"):
                self._decoder.load_weights(path + "/decoder_w.h5", by_name=False)
            elif os.path.exists(path + "/decoder.h5"):
                temp_model = load_model(path + "/decoder.h5")
                self._decoder.set_weights(temp_model.get_weights())
            else: 
                print("WARNING: could not load weights for 'decoder'!")

            # Load Pred
            if os.path.exists(path + "/p_pred_w.h5"):
                self._p_pred.load_weights(path + "/p_pred_w.h5", by_name=False)
            elif os.path.exists(path + "/p_pred.h5"):
                temp_model = load_model(path + "/p_pred.h5")
                self._p_pred.set_weights(temp_model.get_weights())
            else: 
                print("WARNING: could not load weights for 'p_pred'!")

        if self.model is None:
            self._build_model()
示例#7
0
def evaluate(model_path, imgs_path, input_shape, out_path):
    with CustomObjectScope(custom_objects()):
        model = load_model(model_path)
        # model.summary()
    
    imgs = [f for f in os.listdir(imgs_path)]
    for _ in imgs:
        img = imread(os.path.join(imgs_path, _), mode='RGB')
        img_shape = img.shape
        input_data = img.astype('float32')
        inp_data = input_data
        input_data = imresize(img, input_shape)
        input_data = input_data / 255.
        input_data = (input_data - input_data.mean()) / input_data.std()
        input_data = np.expand_dims(input_data, axis=0)
        
        output = model.predict(input_data)

        mask = cv2.resize(output[0,:,:,0], (img_shape[1], img_shape[0]), interpolation=cv2.INTER_LINEAR)
        mask = np.array(mask, dtype='uint8')
        inp_data = np.array(inp_data, dtype='uint8')
        print(mask.shape," ",inp_data.shape)
        res = cv2.bitwise_and(inp_data,inp_data,mask = mask)
        res[mask==0] = 255
        # img_with_mask = blend_img_with_mask(img, mask, img_shape)
        imsave(out_path + _, res)
示例#8
0
def main():
    """
    Generate CoreML model for benchmark by using non-trained model.
    It's useful if you just want to measure the inference speed
    of your model
    """
    hack_coremltools()

    sizes = [224, 192, 160, 128]
    alphas = [1., .75, .50, .25]
    name_fmt = 'mobile_unet_{0:}_{1:03.0f}_{2:03.0f}'

    experiments = [{
        'name':
        name_fmt.format(s, a * 100, a * 100),
        'model':
        MobileUNet(input_shape=(s, s, 3),
                   input_tensor=Input(shape=(s, s, 3)),
                   alpha=a,
                   alpha_up=a)
    } for s, a in product(sizes, alphas)]

    for e in experiments:
        model = e['model']
        name = e['name']

        model.summary()

        with CustomObjectScope(custom_objects()):
            coreml_model = coremltools.converters.keras.convert(
                model, input_names='data')
        coreml_model.save('artifacts/{}.mlmodel'.format(name))
def load_connear_model(modeldir,
                       json_name="/Gmodel.json",
                       weights_name="/Gmodel.h5",
                       crop=1,
                       name=[]):
    # Function to load each CoNNear model using tensorflow and keras
    #print ("loading model from " + modeldir )
    json_file = open(modeldir + json_name, "r")
    loaded_model_json = json_file.read()
    json_file.close()

    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model = model_from_json(loaded_model_json, custom_objects={'tf': tf})
    if name:
        try:
            model.name = name
        except:  # fix tensorflow 2 compatibility
            model._name = name
    model.load_weights(modeldir + weights_name)

    if not crop:  # for connecting the different modules
        model = model.layers[1]
        if name:
            model = Model(model.layers[0].input,
                          model.layers[-2].output,
                          name=name)
        else:
            model = Model(model.layers[0].input,
                          model.layers[-2].output)  # get uncropped output
    #else:
    #    model=Model(model.layers[0].input, model.layers[-1].output) # get cropped output

    return model
示例#10
0
 def commentSentiment(self, model, comment):
     print("Read data..")
     lines_processed = ReadData(PROCESSED['lemmatize'],
                                choice=self.choice).readProcessedReview()
     print("tokenize...")
     tokenizer = Tokenizer()
     tokenizer.fit_on_texts(lines_processed)
     tes_res = [0]
     print("Comment = ", comment, type(comment))
     test_sample_1 = comment
     test_samples = [test_sample_1]
     test_samples_tokens = tokenizer.texts_to_sequences(test_samples)
     test_samples_tokens_pad = pad_sequences(test_samples_tokens,
                                             maxlen=200)
     print("Load Model...")
     with CustomObjectScope({'Attention': Attention}):
         new_model = load_model(os.path.join(config.MODELS, model))
     probability = new_model.predict(x=test_samples_tokens_pad)
     print("Probability = ", probability)
     predictions = (probability > 0.5).astype('int32')
     print("Class = ", type(predictions))
     if predictions == 0:
         sent = "Negative"
         print(sent)
     else:
         sent = "Positive"
         print(sent)
     return sent, probability
    def init_model(self, model_path):
        with CustomObjectScope({'initialize_weights': WeightsInitializer, 'initialize_bias': BiasInitializer}):
            input_shape = (SIZE_FFT, SIZE_COLS, 3)
            model = Sequential()
            model.add(Conv2D(64, (10, 10), activation='relu', input_shape=input_shape,
                             kernel_initializer=initialize_weights, kernel_regularizer=l2(2e-4)))
            model.add(BatchNormalization())
            model.add(MaxPooling2D())

            model.add(Conv2D(128, (7, 7), activation='relu',
                             kernel_initializer=initialize_weights,
                             bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
            model.add(BatchNormalization())
            model.add(MaxPooling2D())

            model.add(Conv2D(128, (4, 4), activation='relu', kernel_initializer=initialize_weights,
                             bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
            model.add(BatchNormalization())
            model.add(MaxPooling2D())

            model.add(Conv2D(256, (4, 4), activation='relu', kernel_initializer=initialize_weights,
                             bias_initializer=initialize_bias, kernel_regularizer=l2(2e-4)))
            model.add(Flatten())
            model.add(Dense(1024, activation='softmax',
                            kernel_regularizer=l2(1e-3),
                            kernel_initializer=initialize_weights, bias_initializer=initialize_bias))
            model.load_weights(model_path)
            return model
示例#12
0
 def load_model(self, path):
     with CustomObjectScope({
             'gated_activation': gated_activation,
             'rmse': rmse
     }):
         self._model = load_model(path)
         print('Model restored')
    def get_model(file_name):
        # Disable GC for calls to pickle to increase de-serialisation speed.
        before = gc.isenabled()
        gc.disable()

        # Check cache first.
        model = ModelFactory.get_from_cache(file_name)
        if model is not None:
            print("INFO: Loading ", file_name, " from cache.", file=sys.stderr)
            return model

        print("INFO: Loading ", file_name, file=sys.stderr)
        root, ext = splitext(file_name)

        if ext == ".h5":
            # Any custom objects _must_ be in scope for loading.
            with CustomObjectScope({
                    "<lambda>": bounded_relu,
                    "AttentionWithContext": AttentionWithContext
            }):
                model = load_model(file_name)
        elif ext == ".pickle":
            model = pickle.load(open(file_name, "r"))
        else:
            raise NameError(
                "Model must be either stored in .h5 or .pickle format. Aborting."
            )

        # Cache newly loaded models.
        ModelFactory.cache_model(file_name, model)

        if before:
            gc.enable()

        return model
    def __init__(self):
        # 개체명인식 모델
        ## 불러오기 Keras+CRF save, load 시 custom_objects 구문 필요
        ## https://keras.io/getting-started/faq/#handling-custom-layers-or-other-custom-objects-in-saved-models

        # Fasttext
        # index number.dictionary
        self.word_index = FastText.load(self.entity_model_path +
                                        "fasttext")  # FastText 문장 데이터
        with open(self.entity_model_path + 'entityIndex.pickle', 'rb') as f:
            self.entity_index = pickle.load(f)

        # Keras Model.h5
        crf = CRF(len(self.entity_index))
        with CustomObjectScope({
                'CRF': crf,
                'crf_loss': crf.loss_function,
                'crf_viterbi_accuracy': crf.accuracy
        }):
            self.entity_model = load_model(self.entity_model_path + "model.h5")
            self.entity_weight = self.entity_model.load_weights(
                self.entity_model_path + "weight.h5")

        print(
            "######################## Success Entity Model load ########################\n\n\n"
        )
示例#15
0
def load_network(path, platform):
    """Loads a neural network.

	If platform=='linux', the model is loaded from a .h5 file using 
	keras.models.load_model(path). If platform=='windows', the model
	architecture is first loaded from a .json, after which the 
	weights are loaded separately.

	Arguments
	---------
	path : str (path object)
		Path to folder containing the files NETWORK.h5, STRUCTURE.h5 and
		WEIGHTS.h5.
	platform : str
		OS, either linux or windows.
	"""

    if platform == 'linux':
        network = models.load_model(path + '/NETWORK.h5')

    elif platform == 'windows':
        from keras.utils import CustomObjectScope
        from keras.initializers import glorot_uniform
        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            with open(path + '/STRUCTURE.json', 'r') as f:
                json_string = f.read()
                network = keras_models.model_from_json(json_string)
                network.load_weights(path + '/WEIGHTS.h5')

    return network
示例#16
0
    def make_prediction(self):
        K.reset_uids()
        model = ""
        weights = ""
        classes = {
            'TRAIN': ['GRADE 0', 'GRADE 1', 'GRADE 2', 'GRADE 3', 'GRADE 4'],
            'VALIDATION': ['GRADE 0', 'GRADE 1', 'GRADE 2', 'GRADE 3', 'GRADE 4'],
            'TEST': ['GRADE 0', 'GRADE 1', 'GRADE 2', 'GRADE 3', 'GRADE 4'],
        }    
        
        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            with open(model, 'r') as f:
                model = model_from_json(f.read())
                model.load_weights(weights)

        xray_image = image.load_img(self.img, target_size=(224, 224))
        x = image.img_to_array(xray_image)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        model.compile(loss="categorical_crossentropy", metrics=[
                      "accuracy"], optimizer="adam")
        result = model.predict(x)

        pred_name = classes['TRAIN'][np.argmax(result)]

        messages.success(result, f"The osteoarthritis is predicted to be {pred_name}".format(pred_name))

        return pred_name
示例#17
0
def load_lenet(file, trans_configs=None, use_logits=False, wrap=False):
    """
    Load a LeNet model (implemented in keras).
    :param file: str or path. The full-path file name to the trained model.
    :param trans_configs: dictionary. The corresponding transformation settings.
    :param use_logits: boolean. Use the logits or the probabilities.
    :param wrap: boolean. True, if want to wrap the model into a weak defense in Athena.
    :return:
    """
    print('>>> Loading model [{}]...'.format(file))
    with CustomObjectScope({"GlorotUniform": glorot_uniform()}):
        model = load_model(file)

    if wrap:
        if trans_configs is None:
            # load the undefended model by default
            trans_configs = {
                "type": "clean",
                "subtype": "",
                "id": 0,
                "description": "clean"
            }
        model = WeakDefense(model=model,
                            trans_configs=trans_configs,
                            use_logits=use_logits)
    else:
        if use_logits:
            model = _convert2logits(model)

    return model
def predict(x):
    keras.backend.clear_session()
    with open("model_data/regression_model.json", "r") as data:
        model_json = json.load(data)

    # In[3]:

    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model = keras.models.model_from_json(model_json)

    # In[7]:
    x = np.array([x])

    model.load_weights("model_data/regression_weights")

    # In[5]:

    #has to be in this format
    #x = np.array([[-0.39805957, -0.48028321,  0.41150996, -0.26091209, -1.01875102,
    #         -0.68907594, -0.37316522,  1.22683341, -0.63105852, -0.70158051,
    #         -1.17261374,  0.45007505,  0.44124745]])

    # In[8]:

    return (model.predict(x)[0, 0])
示例#19
0
def predictionImage():
    # remove warnins for compile model after loading
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        test_model = load_model('../model/best_model.h5')

    # open image on input path
    with open('binarizedImage.jpg', 'r') as f:
        with Image.open(f).convert('L') as image:

            # change size of binarized image to 28x28 pixels
            resized_image = image.resize((28, 28), Image.ANTIALIAS)
            plt.imshow(resized_image)
            plt.show()

            # convert image to array
            x = img_to_array(resized_image)

            # add one dimension on image, [28, 28] -> [1, 28, 28]
            x = np.expand_dims(x, axis=0)

            # get predictions for all outputs(numbers)
            predictions = test_model.predict_classes(x)
            probabilities = test_model.predict_proba(x)

            # write data on output
            print("Number is: " + str(predictions))

            # remove image from disc
            os.remove('binarizedImage.jpg')
示例#20
0
    def __init__(self, folds):

        self.folds = folds
        self.Models = []
        with open(os.path.join(os.getcwd(), "config.json")) as f:
            config = json.load(f)
        if "embedding" in config.keys():
            self.embedding = config["embedding"]
        preprocess_obj = CustomUnpickler(
            open(
                os.path.join(os.getcwd(),
                             'bureau/models/WEpreprocess_obj.pkl'),
                'rb')).load()
        if config["model"] == "SimpleRNN":
            for i in range(self.folds):
                model = keras.models.load_model(
                    os.path.join(
                        os.getcwd(),
                        "bureau/models/IntentWithEntity" + str(i) + ".h5"))
                self.Models.append(model)
        elif config["model"] == "Attention":
            for i in range(self.folds):
                with CustomObjectScope({'AttentionLayer2': attention2}):
                    model = keras.models.load_model(
                        os.path.join(
                            os.getcwd(), "bureau/models/IntentWithEntityAttn" +
                            str(i) + ".h5"))
                    self.Models.append(model)
        self.tokenizer = preprocess_obj.tokenizer
        self.max_len = preprocess_obj.max_len
        with open(os.path.join(os.getcwd(), 'bureau/models/WEid2intent.pkl'),
                  "rb") as f3:
            self.id2intent = pickle.load(f3)
def main():
    with CustomObjectScope(custom_objects()):
        model = load_model(SAVED_MODEL)

    img_in = misc.imread("data/border.jpg")

    i_width = 224
    i_height = 224

    img_resize = skimage.transform.resize(img_in, (i_width, i_height),
                                          preserve_range=True)
    img = np.copy(img_resize).astype('uint8')

    img_reshape = img.reshape(1, size, size, 3).astype(float)

    t1 = time.time()
    pred = model.predict(standardize(img_reshape)).reshape(size, size)
    elapsed = time.time() - t1
    print('elapsed1: ', elapsed)

    plt.subplot(2, 2, 1)
    plt.imshow(img)

    plt.subplot(2, 2, 2)
    plt.imshow(pred)

    plt.show()
def experiment(dl_params, model_params, explainer_type, save_dir=""):

    keras.backend.clear_session()

    #   create data
    print("Loading data...")
    dataloader = Dataloader(dl_params, rseed=0)
    #X_train, y_train = dataloader.get_dataset("train")
    #X_valid, y_valid = dataloader.get_dataset("valid")
    X_test, y_test = dataloader.get_dataset("test")
    del dataloader  # save some memory

    #   convert to np.array
    #X_train = np.stack(X_train, axis=0)
    #X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    #y_train = np.asarray(y_train)
    #y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    #X_train = X_train.astype("float") / 255.0
    #X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #image = expand_dims(X_test[0], axis=0)
    image = X_test[70]
    print(image.shape)

    print(matplotlib.get_backend())

    print("Building classifier...")
    #   add this line to prevent some Keras serializer error
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model = load_model(model_params['load_location'])

    print("Predicting image...")
    label = model.predict(np.array([
        image,
    ]))

    print("The inputted image is predicted to be ", label)

    print("Building explainer...")
    if model_params['output_dim'] > 2:
        model_wo_sm = iutils.keras.graph.model_wo_softmax(
            model)  #   remove softmax
    else:
        model_wo_sm = model

    explainer = innvestigate.create_analyzer(explainer_type, model_wo_sm)
    print("Explainer type: ", type(explainer))
    explain_innvestigate(image,
                         label,
                         explainer,
                         save_name=explainer_type,
                         save_dir=save_dir)

    keras.backend.clear_session()
示例#23
0
def custom_object_scope():
    return CustomObjectScope({
        'categorical_crossentropy_from_logits': categorical_crossentropy_from_logits,
        'PGD': PGD,
        'Crop': Crop,
        'Preprocessing': Preprocessing,
        'OneHot': OneHot
    })
示例#24
0
def predict(model_path):
    """模型预测流程
    """
    # 环境设置
    with CustomObjectScope(custom_objects):
        model = load_model(model_path)

    return model
示例#25
0
def predict(m):
    mi = np.array([m])
    #Retrieve models
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        clf = load_model('image_model.h5')
    for i in range(10):
        if clf.predict(mi)[0][i] == 1:
            return i
示例#26
0
def load_model(path):
    shape = probe_model_shape(path)
    with CustomObjectScope({
            '_loss_deepclustering': loss_deepclustering(*shape),
            '_loss_mask': loss_mask(*shape),
    }):
        model = keras.models.load_model(path)
    return model
示例#27
0
    def __init__(self):

        print('INFO: Loading viewclass model')

        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
                self.viewmodel = load_model( os.path.join('..', 'models', self.MODEL_NAME) , custom_objects={"Tile":Tile})

        print('INFO: Viewclass model loaded')
示例#28
0
    def __init__(self,
                 state_size,
                 strategy="t-dqn",
                 reset_every=1000,
                 pretrained=False,
                 model_name=None,
                 manual=False):
        self.strategy = strategy

        # agent config
        self.state_size = state_size  # normalized previous days
        self.action_size = 3  # [sit, buy, sell]
        self.model_name = model_name
        self.inventory = []
        self.buffer = []
        self.first_iter = True
        self.nstep = 5
        self.n_step_buffer = deque(maxlen=self.nstep)
        self.cnt = count()
        self.alpha = 0.6
        # self.memory = deque(maxlen = 10000)

        # model config
        self.model_name = model_name
        self.gamma = 0.95  # affinity for long term reward
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.learning_rate = 0.001

        # For Categorical DQN
        #Initializing Atoms
        # self.num_atoms = 51
        # self.v_max = 30 # Max possible score for Defend the center is 26 - 0.1*26 = 23.4
        # self.v_min = -10 # -0.1*26 - 1 = -3.6
        # self.delta_z = (self.v_max - self.v_min) / float(self.num_atoms - 1)
        # self.z = [self.v_min + i * self.delta_z for i in range(self.num_atoms)]

        self.loss = huber_loss
        self.custom_objects = {
            "huber_loss": huber_loss
        }  # important for loading the model from memory
        self.optimizer = Adam(lr=self.learning_rate)

        if pretrained and self.model_name is not None:
            self.model = self.load(manual)
        else:
            self.model = self._model()

        # strategy config
        if self.strategy in ["t-dqn", "double-dqn"]:
            self.n_iter = 1
            self.reset_every = reset_every

            with CustomObjectScope({"NoisyDense": NoisyDense}):
                # target network
                self.target_model = clone_model(self.model)
            self.target_model.set_weights(self.model.get_weights())
def main():

    class_names = [
        '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
        'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
        'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
    ]

    img_shape = 20

    # load a file that cointain the structure of the trained model
    json_file = open('model/neural_network.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model = model_from_json(loaded_model_json)
    # load the weights of the trained model
    model.load_weights("model/neural_network.h5")

    # open file that will contain the license plate numbers (strings)
    f = open('licencePlates.txt', 'w')

    # path that contains the images of licence plate chars, each image contain chars (20x20 images)
    # concatenate each other (the dimension of the image will be #ofchars x 20)
    fn = "licence_plates/*.jpg"

    # extract image names from the path
    filenames = glob.glob(fn)
    filenames.sort()
    images = []

    # load images and save them in a vector of images
    for img in filenames:
        image = cv2.imread(img)
        images.append(image)

    for img in images:
        S = ''
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) / 255
        # extract each char (20x20) from the image
        for j in range(int(img.size / (img_shape * img_shape))):
            char = img[:, img_shape * j:img_shape * (j + 1)]
            cv2.transpose(char, char)
            char = char.reshape((-1, img_shape, img_shape, 1), order="F")
            # predict the label of the char
            predictor = model.predict(char)
            max_prob = np.argmax(predictor)
            # concatenate chars in order to obtain a string with the number of the licence plate
            S = S + class_names[max_prob]

        S = S + "\n"
        # the plates are in the same order of the images of the dataset
        print("Plate detected: " + S)
        # save the string, it will then be loaded in c++ program
        f.write(S)

    f.close()
    keras.backend.clear_session()
示例#30
0
def main():

    # Parse arguments
    parser = argparse.ArgumentParser()
    parser.add_argument(dest="data_path",
                        metavar="DATA_PATH",
                        help="Path to read data from.")
    parser.add_argument(dest="model_path",
                        metavar="MODEL_PATH",
                        help="Path to read model from.")
    parser.add_argument(
        "-b",
        "--read_batches",
        metavar="READ_BATCHES",
        default=False,
        help="If true, data is read incrementally in batches during training.")
    args = parser.parse_args()
    parse_args(args)

    # Load model
    with CustomObjectScope({
            '_euclidean_distance': cnn_siamese_online._euclidean_distance,
            'ALPHA': cnn_siamese_online.ALPHA,
            "relu_clipped": cnn_siamese_online.relu_clipped
    }):
        tower_model = load_model(args.model_path)
        tower_model.compile(
            optimizer='adam',
            loss='mean_squared_error')  # Model was previously not compile

    if not args.read_batches:  # Read all data at once

        # Load training triplets and validation triplets
        X_train, y_train = utils.load_examples(args.data_path, "train")
        X_valid, y_valid = utils.load_examples(args.data_path, "valid")

        # Get abs(distance) of embeddings
        X_train_emb = tower_model.predict(X_train)
        X_valid_emb = tower_model.predict(X_valid)

    else:  # Read data in batches
        raise ValueError("Reading in batches is not implemented yet.")

    # Shuffle the data
    X_train_emb, y_train = shuffle(X_train_emb, y_train)
    X_valid_emb, y_valid = shuffle(X_valid_emb, y_valid)

    # Run k-means on training data
    print("Running K-means...")
    k_means_model = KMeans(n_clusters=K, verbose=0)
    k_means_model.fit(X_train_emb)

    # Plot result
    k_means_PCA(k_means_model, X_train_emb, y_train, display_k_means=True)
    k_means_PCA(k_means_model, X_train_emb, y_train, display_k_means=False)

    # Compute percentage of each class in each cluster
    compute_cluster_class_fractions(k_means_model, y_train)