Example #1
0
def load_model(model_path):

    model_config_path = os.path.join(os.path.dirname(model_path), "config.yml")

    model_name = load(model_config_path, "model_name")

    if hasattr(MODELS[model_name], "custom_objects"):
        return keras_load_model(
            model_path,
            custom_objects=MODELS[model_name].custom_objects,
            compile=False)
    else:
        return keras_load_model(model_path, compile=False)
Example #2
0
    def load_model(self, load_model_name: str, detection: str, num_set: str):

        model_path = os.path.join("Models", detection,
                                  load_model_name + "_" + num_set + "_last.h5")
        model = keras_load_model(model_path)

        return model
Example #3
0
 def _load_model(self, load_dir, verbose=True):
     """
     Loads models from a directory.
     :param load_dir: path to directory to load
     :param verbose: print details
     :return: None
     """
     # load models and associated history
     # Usually called manually after a pickle load of all other attributes
     self.discriminator = keras_load_model(f"{load_dir}/discriminator")
     self.discriminator.trainable = False
     self.generator = keras_load_model(f"{load_dir}/generator")
     self.combined = keras_load_model(f"{load_dir}/combined")
     with open(f"{load_dir}/history.p", "rb") as file:
         self.history = pickle.load(file)
     if verbose:
         self.print_summary()
Example #4
0
 def __init__(self, teacher_model):
     self.train_model, self.born_again_model = None, None
     self.temperature = args.temperature
     self.teacher_model = keras_load_model(teacher_model)
     for i in range(len(self.teacher_model.layers)):
         self.teacher_model.layers[i].trainable = False
     self.teacher_model.compile(optimizer="adam",
                                loss="categorical_crossentropy")
     self.train_model, self.born_again_model = self.prepare()
     self.train_model = convert_gpu_model(self.train_model)
Example #5
0
    def load_model(self, load_model_name: str, detection: str, num_set: str):

        model_path = os.path.join("Models", detection,
                                  load_model_name + "_" + num_set + "_last.h5")

        model = keras_load_model(model_path,
                                 custom_objects={
                                     "DiluteLayer": DiluteLayer,
                                     "CustomF1": CustomF1
                                 },
                                 compile=False)

        return model
Example #6
0
def load_model(path=None):
    if path is None:
        return model.single_frame_model()
    else:
        if os.path.exists(path):
            return keras_load_model(path,
                                    custom_objects={
                                        'temporal_crossentropy':
                                        train.temporal_crossentropy,
                                        'temporal_accuracy':
                                        train.temporal_accuracy
                                    })
        else:
            raise FileNotFoundError(f"Model path {path} doesn't exist")
Example #7
0
def load_model(filepath, custom_objects=None, compile=True) -> Model:
    """
    Equivalent to load_model from keras, but custom_objects are already known

    Args:
        filepath: One of the following: - String, path to the saved model - `h5py.File`
            object from which to load the model.
        custom_objects: Optional dictionary mapping names (strings) to custom classes
            or functions to be considered during deserialization.
        compile: Boolean, whether to compile the model after loading.

    Returns:
        A Keras model instance. If an optimizer was found

    """
    deel_custom_objects = CUSTOM_OBJECTS.copy()
    if custom_objects is not None:
        deel_custom_objects.update(custom_objects)
    return keras_load_model(filepath, deel_custom_objects, compile)
Example #8
0
def load_model(model_file, network_config):
    model = keras_load_model(model_file)

    # If necessary, wrap the loaded model to transpose the axes for both
    # inputs and outputs.
    if network_config.transpose:
        inputs = []
        perms = []
        for old_input in model.input_layers:
            input_shape = np.asarray(old_input.input_shape)[[3, 2, 1, 4]]
            new_input = Input(shape=tuple(input_shape),
                              dtype=old_input.input_dtype,
                              name=old_input.name)
            perm = Permute((3, 2, 1, 4),
                           input_shape=tuple(input_shape))(new_input)
            inputs.append(new_input)
            perms.append(perm)

        old_outputs = model(perms)
        if not isinstance(old_outputs, list):
            old_outputs = [old_outputs]

        outputs = []
        for old_output in old_outputs:
            new_output = Permute((3, 2, 1, 4))(old_output)
            outputs.append(new_output)

        new_model = Model(input=inputs, output=outputs)

        # Monkeypatch the save to save just the underlying model.
        func_type = type(model.save)

        old_model = model

        def new_save(_, *args, **kwargs):
            old_model.save(*args, **kwargs)

        new_model.save = func_type(new_save, new_model)

        model = new_model

    return model
Example #9
0
    def load_keras_model(self, identifier):
        if not identifier.endswith('h5'):
            raise ValueError(
                "To load keras model, model extension must end with `h5`")

        try:
            model_path = os.path.join(self.output_folder, identifier)

            self._file_exists(model_path)
            if not is_tf_support:
                raise RuntimeError(
                    "When try to use `TensorFlow` backend to load model, couldn't load module: `tensorflow`, please check!"
                )

            model = keras_load_model(model_path)

            return model
        except IOError as e:
            logger.error("When to load keras model, get error: {}".format(e))
            raise IOError("When to load keras model, get error: {}".format(e))
Example #10
0
def load_model(path='../models/inception_v3'):
    """Retrieves the trained model"""
    model = keras_load_model(path)
    return model
Example #11
0
def load_model(filename, **kwargs):
    """
    Load model from file

    Parameters
    ----------
    filename : str
        path to the h5 file

    Returns
    -------
    deoxys.model.Model
        The loaded model
    """
    # Keras got the error of loading custom object
    try:
        loaded_model = keras_load_model(filename,
                                        custom_objects={
                                            **Layers().layers,
                                            **Activations().activations,
                                            **Losses().losses,
                                            **Metrics().metrics
                                        })

        # keyword arguments to create the model
        model_kwargs = {}
        with h5py.File(filename, 'r') as hf:
            # get the data_reader
            if 'deoxys_config' in hf.attrs.keys():
                config = hf.attrs['deoxys_config']
                config = load_json_config(config)

                if 'dataset_params' in config:
                    model_kwargs['data_reader'] = load_data(
                        config['dataset_params'])

                model_kwargs['config'] = config

            # take the sample data
            if 'deoxys' in hf.keys():
                if 'batch_x' in hf['deoxys'] and 'batch_y' in hf['deoxys']:
                    model_kwargs['sample_data'] = (hf['deoxys']['batch_x'][:],
                                                   hf['deoxys']['batch_y'][:])
                elif 'batch_y' in hf['deoxys']:
                    batch_y = hf['deoxys']['batch_y'][:]
                    batch_x = []
                    for key in hf['deoxys'].keys:
                        if 'batch_x' in key:
                            i = int(key[-1])
                            assert i == len(batch_x)
                            batch_x.append(hf['deoxys'][f'batch_x_{i}'])
                    model_kwargs['sample_data'] = (batch_x, batch_y)

            # User input will overwrites all existing args
            model_kwargs.update(kwargs)

        model = Model(loaded_model, pre_compiled=True, **model_kwargs)

    except Exception:
        sample_data = None
        with h5py.File(filename, 'r') as hf:
            if 'deoxys_config' in hf.attrs.keys():
                config = hf.attrs['deoxys_config']

            if 'deoxys' in hf.keys():
                if 'batch_x' in hf['deoxys'] and 'batch_y' in hf['deoxys']:
                    sample_data = (hf['deoxys']['batch_x'][:],
                                   hf['deoxys']['batch_y'][:])

        model = model_from_full_config(config,
                                       weights_file=filename,
                                       sample_data=sample_data)

    return model
Example #12
0
def launch_model():
    full_text = request.form['full_text']
    id_ = request.form['id']
    model_type = request.form['model_type']

    global BERT, JOINT, GRANU, MGN, NUM_TASK, MASKING, HIER
    BERT = model_type == BERT_PATH
    JOINT = model_type == JOINT_BERT_PATH
    GRANU = model_type == GRANU_BERT_PATH
    MGN = model_type == MGN_SIGM_BERT_PATH

    # either of the four variants:
    # BERT = False
    # JOINT = False
    # GRANU = False
    # MGN = True

    assert BERT or JOINT or GRANU or MGN
    assert not (BERT and JOINT) and not (BERT and GRANU) and not (BERT and MGN) \
           and not (JOINT and GRANU) and not (JOINT and MGN) and not (GRANU and MGN)

    # either of the two variants
    SIGMOID_ACTIVATION = True
    RELU_ACTIVATION = False
    assert not (SIGMOID_ACTIVATION and RELU_ACTIVATION) and (
        SIGMOID_ACTIVATION or RELU_ACTIVATION)

    if BERT:
        NUM_TASK = 1
        MASKING = 0
        HIER = 0
    elif JOINT:
        NUM_TASK = 2
        MASKING = 0
        HIER = 0
    elif GRANU:
        NUM_TASK = 2
        MASKING = 0
        HIER = 1
    elif MGN:
        NUM_TASK = 2
        MASKING = 1
        HIER = 0
    else:
        raise ValueError(
            "You should choose one of bert, joint, granu and mgn in options")

    dct = {
        'NUM_TASK': NUM_TASK,
        'MASKING': MASKING,
        'SIGMOID_ACTIVATION': SIGMOID_ACTIVATION,
        'HIER': HIER
    }
    model = load_model(model_type, **dct)

    if not id_:
        ids = get_existent_ids()
        id_ = random_module.randint(0, N)
        while id_ in ids:
            id_ = random_module.randint(0, N)
        with open(DIRECTORY_PREDICT.joinpath(f'article{id_}.txt'),
                  'w',
                  encoding='utf-8') as f:
            f.write(full_text)

    text = overwrite_one_article(id_, directory=DIRECTORY_PREDICT)

    my_predict_dataset = PropDataset(DIRECTORY_PREDICT, is_test=True)
    my_predict_iter = data.DataLoader(dataset=my_predict_dataset,
                                      batch_size=BATCH_SIZE,
                                      shuffle=False,
                                      num_workers=1,
                                      collate_fn=pad)

    tmp_file = 'tmp.txt'
    eval(model,
         my_predict_iter,
         tmp_file,
         criterion,
         binary_criterion,
         NUM_TASK=NUM_TASK)
    ids, texts = read_data(DIRECTORY_PREDICT, is_test=True)
    t_texts = clean_text(texts, ids)
    flat_texts = [sentence for article in t_texts for sentence in article]
    fi, prop_sents = convert(NUM_TASK - 1, flat_texts, tmp_file)
    prop_sents = prop_sents[id_]
    prop_sents = ['1' if elem else '' for elem in prop_sents]

    results = remove_duplicates(fi)

    DIRECTORY_PREDICT.joinpath(f'article{id_}.txt').rename(
        DIRECTORY_MARKUP.joinpath(f'article{id_}.txt'))

    lst = [set() for _ in range(len(full_text))]
    source_lst = [set() for _ in range(len(full_text))]
    for inner_lst in results:
        for i in range(inner_lst[-2], inner_lst[-1]):
            lst[i].add(HUMAN_READABLE_TECHNIQUES[TECHNIQUES.index(
                inner_lst[-3])])
            source_lst[i].add(inner_lst[-3])

    extracts_s_e = []
    extracts = []
    categories = []
    for elem in fi:
        if elem[0] != str(id_):
            continue
        _, category, start, end = elem
        extracts_s_e.append((start, end))
        extracts.append(text[start:end])
        categories.append(category)

    extracts = [
        ' '.join(normalize(extract.strip())) for extract in extracts if extract
    ]
    print(f'extracts: {extracts}')

    # CHECK
    # extracts = [word for sent in extracts for word in sent.split()]

    test_x, test_maxlen = get_data(extracts,
                                   vocab_size=args.vocab_size,
                                   maxlen=args.maxlen)
    test_x = sequence.pad_sequences(test_x,
                                    maxlen=max(train_maxlen, test_maxlen))

    test_length = test_x.shape[0]
    splits = []
    for i in range(1, test_length // args.batch_size):
        splits.append(args.batch_size * i)
    if test_length % args.batch_size:
        splits += [(test_length // args.batch_size) * args.batch_size]
    test_x = np.split(test_x, splits)

    with graph.as_default():
        aspect_model = keras_load_model(os.path.join('flask_app', 'output',
                                                     'reviews', 'model_param'),
                                        custom_objects={
                                            "Attention": Attention,
                                            "Average": Average,
                                            "WeightedSum": WeightedSum,
                                            "MaxMargin": MaxMargin,
                                            "WeightedAspectEmb":
                                            WeightedAspectEmb,
                                            "max_margin_loss":
                                            U.max_margin_loss
                                        },
                                        compile=True)

        test_fn = K.function([
            aspect_model.get_layer('sentence_input').input,
            K.learning_phase()
        ], [
            aspect_model.get_layer('att_weights').output,
            aspect_model.get_layer('p_t').output
        ])
        aspect_probs = []

        for batch in tqdm(test_x):
            _, cur_aspect_probs = test_fn([batch, 0])
            aspect_probs.append(cur_aspect_probs)

        aspect_probs = np.concatenate(aspect_probs)

        label_ids = np.argsort(aspect_probs, axis=1)[:, -5:]
        for i, labels in enumerate(label_ids):
            print(
                f'{extracts[i]}: {[aspects[label] for label in labels][::-1]}')

    correct_lst = ['; '.join(list(elem)) for elem in lst]
    commands = {
        extract: ([aspects[label] for label in label_ids[i]][::-1], [])
        for i, extract in enumerate(extracts)
    }
    write_existent_dict(id_, source_lst, directory=DIRECTORY_MARKUP)

    for f in glob.glob(f'{DIRECTORY_PREDICT}/*'):
        os.remove(f)

    return jsonify(
        result={
            'id': id_,
            'list': correct_lst,
            'text': text,
            'prop_sents': prop_sents,
            'commands': commands
        })
Example #13
0
# vehicle_dataset = 'data/vehicle-detector/voc.data'.encode("utf-8")    

lp_threshold = .6
wpod_net_path = "data/lp-detector/wpod-net_update1.h5"
wpod_net = load_model(wpod_net_path)

ocr_threshold = .6
ocr_weights = 'data/ocr/ocr-net.weights'.encode("utf-8")
ocr_netcfg  = 'data/ocr/ocr-net.cfg'.encode("utf-8")
ocr_dataset = 'data/ocr/ocr-net.data'.encode("utf-8")
ocr_net  = dn.load_net(ocr_netcfg, ocr_weights, 0)
ocr_meta = dn.load_meta(ocr_dataset)

car_cat_threshold = .75
car_cat_net_path = "data/car-cat-detector/car-cat-detector.h5"
car_cat_net = keras_load_model(car_cat_net_path)

car_cat_img_size = 299
car_cat_vehicle_width_threshold = 120 # pixels
car_cat_vehicle_height_threshold = 120 # pixels

car_cat_names = []
with open('data/car-cat-detector/car-cat-detector.names') as csvDataFile:
    csvReader = csv.reader(csvDataFile, delimiter=';')
    for row in csvReader:
        car_cat_names.append(row[0])

print(car_cat_names)

CAR_TRACK_MIN_DISTANCE = 40
CAR_TRACK_N_FRAME = 8