def load_best_model(results_dir):
    """
    Select the best model of the list of model checkpoints saved in the results dir of a fold.

        INPUT:
            results_dir - the directory in which the saved model checkpoints can be found

        OUTPUT:
            model - the best performing trained Keras model which was saved as checkpoint
    """

    # get all files in the results dir and sort so that best model is listed last
    L = os.listdir(results_dir)
    L.sort()

    # select best model
    import C_MainScripts.ordinal_categorical_crossentropy as OCC
    if config.loss_function != OCC.customLossFunction:
        model = load_model(results_dir + "/" + L[-1])
    else:
        model = load_model(results_dir + "/" + L[-1], compile=False)

    print("Loading best model for evaluation: ", L[-1])

    return model
Beispiel #2
0
 def __init__(self):
     if rotdot:
         print('entered rotdot in mpc constructor')
         input_shape_mlp = 2
         self.scaler = MinMaxScaler(feature_range=(-1, 1))
         self.MLP_model = load_model(
             'model/mlp_model_rotdot_10k_1000ep_2hidden.h5')
         #necessary, else there is an error when predicting because of a bug with multithreading and keras
         self.MLP_model._make_predict_function()
         # determine scaling factor
         dataset = np.loadtxt(
             "generated_data/random_rot_dot/training_less_random_10k.data",
             delimiter=";",
             comments='#')
         print('loaded all files')
         x_train = dataset[:, 0:input_shape_mlp]
         self.scaler.fit_transform(x_train)
     else:
         input_shape_mlp = 3
         self.scaler = MinMaxScaler(feature_range=(-1, 1))
         self.MLP_model = load_model(
             'model/mlp_model_eenvoudiger_200k_100ep.h5')
         #necessary, else there is an error when predicting because of a bug with multithreading and keras
         self.MLP_model._make_predict_function()
         #determine scaling factor
         dataset = np.loadtxt(
             "generated_data/random/training_less_random_200k.data",
             delimiter=";",
             comments='#')
         print('loaded all files')
         x_train = dataset[:, 0:input_shape_mlp]
         self.scaler.fit_transform(x_train)
     self.ready = True
Beispiel #3
0
def load(directory: str, name: str, has_memory: bool = False):
    active_model = load_model("{}/{}_active.h5f".format(directory, name))
    target_model = load_model("{}/{}_target.h5f".format(directory, name))
    memory = None
    if has_memory:
        with open("{}/{}_memory.obj".format(directory, name), 'rb') as handler:
            memory = pickle.load(handler)
    return active_model, target_model, memory
Beispiel #4
0
def create_model(args, branch):
    optz = Adam(lr=1e-5)

    if args['dataset'] != Dataset.flood_heights:
        if args['model'] == arguments.Model.dense_net or args[
                'model'] == arguments.Model.attention_guided:
            base_model = DenseNet201(include_top=False, weights='imagenet')
        else:
            base_model = EfficientNetB3(include_top=False, weights='imagenet')

        x = base_model.output
        x = GlobalAveragePooling2D(name="global_average_pooling2d_layer")(x)

        if args['is_binary']:
            print("Binary model.")
            predictions = Dense(1, activation=activations.sigmoid)(x)
            model = Model(inputs=base_model.input, outputs=predictions)
            model.compile(optimizer=optz,
                          loss=losses.binary_crossentropy,
                          metrics=[metrics.binary_accuracy])

        else:
            print("3 number of classes.")
            predictions = Dense(3, activation=activations.softmax)(x)
            model = Model(inputs=base_model.input, outputs=predictions)
            model.compile(optimizer=optz,
                          metrics=[metrics.categorical_accuracy],
                          loss=losses.categorical_crossentropy)

    else:
        if args['model'] == arguments.Model.dense_net or args[
                'model'] == arguments.Model.attention_guided:
            base_model = load_model(
                "/home/jpereira/Tests/weights/" +
                "flood_severity_3_classes_attention_guided_{}_branch_cv/".
                format(branch) + "weights_fold_1_from_10.hdf5")
        else:
            base_model = load_model(
                "/home/jpereira/Tests/weights/" +
                "flood_severity_3_classes_cv/weights_fold_1_from_10.hdf5")

        print("Regression model.")
        output_less_1_m = Dense(1,
                                activation=custom_activation_less_1m,
                                name="less_1m")(base_model.layers[-2].output)
        output_more_1_m = Dense(1,
                                activation=custom_activation_more_1m,
                                name="more_1m")(base_model.layers[-2].output)
        predictions = OutputLayer()(
            [base_model.layers[-1].output, output_less_1_m, output_more_1_m])

        model = Model(inputs=base_model.input,
                      outputs=[base_model.layers[-1].output, predictions])
        model.compile(
            optimizer=optz,
            loss=[losses.categorical_crossentropy, losses.mean_squared_error])

    return model
def select_model(i, non_image_mean, non_image_std):
    """
    Selects and returns the model to use based on the fold (???) and the information of the config file.

        INPUT:
            i - the fold of the cross validation

        OUTPUT:
            model - the selected model

    """

    # load pre-train model
    if config.pre_train:
        model = load_model(config.pre_train_model)
    elif config.fully_trained:
        model = load_model(config.fully_trained_model_dir)
    # build new CNN
    else:
        if config.model == "allCNN":
            # model = build_model_allCNN(non_image_mean, non_image_std)
            if config.partially_trained_weights == True:
                model = build_pretrained_model_allCNN(config.partially_trained_weights_dir, non_image_mean, non_image_std)
            else:
                model = build_model_allCNN(non_image_mean, non_image_std)
        else:
            sys.exit("No valid model selected")

    # freeze first part of network
    if config.freeze:
        model.trainable = True
        set_trainable = False
        conv_cnt = 0
        # select layer to freeze until
        for layer in model.layers:
            if layer.name[:6] == 'conv3d':
                conv_cnt += 1
                # make model trainable from this layer on
                if conv_cnt == config.freeze_until:
                    set_trainable = True
                    layer_name = layer.name
            if set_trainable:
                layer.trainable = True
            else:
                layer.trainable = False

        model.compile(loss=keras.losses.binary_crossentropy, optimizer=optimizers.RMSprop(lr=1e-5),
                      metrics=['accuracy'])
        print(f"\nModel is frozen until layer {config.freeze_until} (with layer name: {layer_name})\n")

    # print model summary for first fold
    if i == 0:
        model.summary()

    if config.pre_train and config.test_only:
        print("\nNo training -> testing only!\n")

    return model
Beispiel #6
0
def load_keras_model():
    try:
        model = load_model('../model.h5')
    except:
        try:
            model = load_model("model.h5")
        except:
            print("Failed to load a saved keras model!")
    finally:
        return model
Beispiel #7
0
    def load(self, uri):
        """
        Loads the model from the specified URI.
        The model uses 4 files: one for the encoder, other for the decoder, other
        for the autoencoder and one for the class options in JSON format.
        :param uri: base filename
        """
        self._encoder = load_model(uri + "_lstm_encoder.hdf5")
        self._autoencoder = load_model(uri + "_lstm_autoencoder.hdf5")

        pf = PyFolder(os.path.dirname(os.path.realpath(uri)))
        dict_options = pf[os.path.basename(uri) + "_options.json"]

        self._latent_space = dict_options['latent_space']
        self._input_cells = dict_options['input_cells']
    def __init__(self, master):
        self.master = master
        master.title("Face Creation")

        self.model = load_model('run\\weights\\decoder_VAE_faces_20_dim.h5')

        self.labels = [
            "Edad", "Sonrisa", "Ojos", "Redondez", "Dientes", "Genero", "Edad",
            "NaN", "Raza", "Tono piel", "Calidez", "Angulo cabeza", "Edad",
            "Color pelo", "Nan", "Nan", "Tono piel", "Genero", "Nan", "Luz"
        ]

        self.image_size = (256, 256)
        self.image = ImageTk.PhotoImage(
            Image.fromarray(
                np.zeros((self.image_size[0], self.image_size[1], 3),
                         'uint8')))
        self.canvas = Label(master, image=self.image)
        self.canvas.grid(column=0, row=0)

        self.scales = []
        for i in [2, 4]:
            for j in range(10):
                self.label = Label(master,
                                   text=self.labels[j if i == 2 else (j + 10)])
                self.label.grid(column=j + 2, row=i - 1)
                self.scales.append(
                    Scale(master,
                          from_=-50,
                          to=50,
                          orient=VERTICAL,
                          command=self.updateValue))
                self.scales[-1].grid(column=j + 2, row=i)
Beispiel #9
0
def test():
    paths = [
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/1查询餐厅test',
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/2提供信息test',
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/3询问问题test',
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/4换一个结果test',
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/910欢迎感谢再见test'
    ]
    # 五个意图统计
    result = []
    lstm_model = load_model('D:\\PyCharm 2019.2\\projects\\lstm_softmax\\output\\word2vec_gru_softmax.h5')
    for path in paths:
        fopen = open(path, 'r', encoding='utf-8')
        testdata = []
        for line in fopen:
            testdata.append(line)
        fopen.close()
        res = lstm_model.predict(input_transform(testdata))
        # 计算准确率
        count = [0,0,0,0,0]
        for list in res:
            max = 0
            max_index = 0
            for index , val in enumerate(list):
                if val > max:
                    max = val
                    max_index = index
            count[max_index] = count[max_index] + 1
        result.append(count)

    print(result)

    debuf= 0
def continue_training():
    """Continues training the chesspiece model based on AlexNet."""
    model = load_model("./models/AlexNet.h5")

    train_generator, validation_generator = data_generators(
        preprocess_input, (224, 224), 64)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    callbacks = model_callbacks(20, "./models/AlexNet_2.h5", 0.2, 8)

    history = train_model(model,
                          100,
                          train_generator,
                          validation_generator,
                          callbacks,
                          use_weights=False,
                          workers=5)

    plot_model_history(history, "./models/AlexNet_2_acc.png",
                       "./models/AlexNet_2_loss.png")
    evaluate_model(model, validation_generator)

    model.save("./models/AlexNet_2_last.h5")
Beispiel #11
0
def getConv2DClassifier(input_shape, num_classes, learning_rate, embedding_size, model_path=None):
    model = Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3),
                     activation='relu',
                     input_shape=input_shape))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(embedding_size, activation='relu', name='dense_embedding'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])

    if model_path is not None:
        logger.info(" >>> Load pretrained classifier at {} and transfer weights".format(model_path))
        pretrained_model = load_model(model_path)
        pretrained_layers = [l for l in pretrained_model.layers]
        layers = [l for l in model.layers]
        assert (len(pretrained_layers) == len(layers))
        for pre_l, cur_l in zip(pretrained_layers, layers):
            cur_l.set_weights(pre_l.get_weights())
    model.summary()
    return model
Beispiel #12
0
    def post(self, id):
        dataset = DataSet.query.get(id)
        data = dataset.train_info
        column_info = dataset.column_info
        date_columns = []
        if column_info:
            date_columns = [
                c for c, v in column_info.items() if v.get('is_date', False)
            ]
        csv = ignore(
            open_csv_as_data_frame(dataset.file_path,
                                   date_columns=date_columns),
            data.get('ignore', []))
        n_out_columns = data['output']
        n_in = data['time_series_lag']
        ts = dataset_to_time_series(csv, n_in, 1, n_out_columns)
        (train_X, train_y), (test_X,
                             test_y) = split_data(ts, get_nr_features(csv),
                                                  n_in, 1, n_out_columns)
        model = load_model(dataset.model_path)

        preds = predict(model,
                        test_X,
                        get_nr_features(csv),
                        n_in,
                        with_ground_truth=False)

        destination = upload_file_location(dataset.name + ".h5", 'models/')
        model.save(destination)
        dataset.model_path = destination
        db.session.add(dataset)
        db.session.commit()
        K.clear_session()
        return render_predictions(preds, test_y)
def run():
    localpath = os.getcwd()
    (x_train,
     y_train), (x_test,
                y_test) = mnist.load_data(localpath + '/mnist/mnist.npz')
    model = load_model(localpath + '/model/SimpleRNNModel.h5')

    # data pre-processing

    x_test = x_test.reshape(-1, 28, 28) / 255.  # normalize
    # print(x_test.shape)
    # print(x_test[0].shape)
    # print(x_test[0].reshape(-1, 28, 28).shape)
    # exit()
    img = x_test
    predict = model.predict_classes(img)
    print(predict)
    print(y_test)
    correct_indices = np.nonzero(predict == y_test)[0]
    incorrect_indices = np.nonzero(predict != y_test)[0]

    y_test = np_utils.to_categorical(y_test, num_classes=10)
    loss, accuracy = model.evaluate(x_test, y_test)
    print('test loss', loss)
    print('accuracy', accuracy)

    print("Classified correctly count: {}".format(len(correct_indices)))
    print("Classified incorrectly count: {}".format(len(incorrect_indices)))
Beispiel #14
0
def load_ann_model():
    """
        Load and return model
        :return:
    """
    model = load_model(_model)
    return model
Beispiel #15
0
def main():
    _, _, test_dataset = generate_subsets()

    # LOAD MODEL TRAINED (ONLY BEST IS SAVED)
    best_model = load_model("./cnn_model.hdf5")

    predicted_probabilities = best_model.predict_generator(
        generator=test_dataset, verbose=1)

    # create column array with predicted labels
    predicted_labels = (predicted_probabilities >=
                        PREDICTION_THRESHOLD).reshape(-1, )
    true_labels = test_dataset.classes[test_dataset.index_array]

    print(
        pd.DataFrame(
            confusion_matrix(true_labels, predicted_labels),
            index=[["Actual", "Actual"], ["ok", "defect"]],
            columns=[["Predicted", "Predicted"], ["ok", "defect"]],
        ))

    print(classification_report(true_labels, predicted_labels, digits=4))

    test_indexes = test_dataset.index_array
    rng = default_rng()
    random_indexes = rng.choice(len(test_indexes), size=16, replace=False)
    plot_results("random", test_dataset, random_indexes, true_labels,
                 predicted_probabilities)

    misclassified_indexes = np.nonzero(predicted_labels != true_labels)[0]
    plot_results("missed", test_dataset, misclassified_indexes, true_labels,
                 predicted_probabilities)
Beispiel #16
0
    def run(self):
        from vae import VaeGenerator
        from keras.engine.saving import load_model
        from vae import vae_classifier

        EPOHCH_SIZE = 20000
        batch_size = 100

        VALIDATION_SIZE = 3000

        steps_per_epoch = EPOHCH_SIZE / batch_size
        validation_steps = VALIDATION_SIZE / batch_size

        mdl = load_model(self.input()[0].path)

        mdl = vae_classifier(mdl)

        train_fold = self.input()[1][0].path
        valid_fold = self.input()[1][1].path
        test_fold = self.input()[1][2].path

        train_gen = VaeGenerator(train_fold, isVAE=False)
        val_gen = VaeGenerator(valid_fold, isVAE=False)

        mdl.fit_generator(train_gen.generator(batch_size),
                          steps_per_epoch=steps_per_epoch,
                          epochs=7,
                          validation_data=val_gen.generator(batch_size),
                          validation_steps=validation_steps,
                          use_multiprocessing=False,
                          workers=1,
                          class_weight=CLASS_WEIGHTS)

        mdl.save(self.output().path)
Beispiel #17
0
def _load_model(save_dir: Path, init_model: InitModel,
                verbose: int) -> T.Tuple[Model, int, int, int]:
    cur_session = 1
    cur_epoch = 0
    cur_val_loss = np.Inf

    model = None
    if save_dir.exists():
        try:
            session_dir, cur_session = _get_most_recent(save_dir)
            model_file, cur_epoch = _get_most_recent(session_dir)
        except ValueError:
            pass
        else:
            logs_file = model_file.parent / (model_file.stem + '.json')
            logs = json.loads(logs_file.read_text())
            cur_val_loss = logs['val_loss']

            model_filepath = str(model_file)
            if verbose > 1:
                print(
                    f'Resumed training from session: {cur_session}, epoch: {cur_epoch}, val_loss: {cur_val_loss}'
                )
                print(f'Loading model @ {model_filepath!r}...')
            model = load_model(model_filepath)
    else:
        save_dir.mkdir(parents=True)
    if model is None:
        model = init_model()

    return model, cur_session, cur_epoch, cur_val_loss
Beispiel #18
0
def predict(net: str,
            epoch: int,
            working_dir: pathlib.Path,
            test_dir: pathlib.Path,
            batch_size: Optional[int] = None,
            image_size: Optional[Tuple[int, int]] = None) -> None:
    model = load_model(working_dir / "models" / f"{net}-{epoch}.h5")
    out_dir = working_dir / "predictions"

    if not image_size:
        image_size = default_image_size(net)

    test_generator = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
        test_dir,
        class_mode=None,
        color_mode="rgb",
        shuffle=False,
        target_size=image_size,
        **{k: v for k, v in {"batch_size": batch_size}.items() if v})
    preds = np.argmax(model.predict_generator(test_generator,
                                              steps=math.ceil(test_generator.n /
                                                              test_generator.batch_size),
                                              verbose=1),
                      axis=1)

    classes = pd.read_csv(working_dir / "classes.csv", header=0, index_col="id", squeeze=True)
    pred_classes = pd.Series(preds.flatten()).apply(lambda p: classes.loc[p])

    out_dir.mkdir(exist_ok=True, parents=True)
    pred_classes.to_csv(out_dir / f"{net}-{epoch}.csv", header=None, index=False)
Beispiel #19
0
def load_our_model(model_name):
    return load_model(model_name,
                      custom_objects={
                          "SinCosPositionalEmbedding":
                          SinCosPositionalEmbedding,
                          "AttentionTransformer": AttentionTransformer
                      })
def test():
    paths = [
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/1查询餐厅test',
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/2提供信息test',
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/3询问问题test',
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/4换一个结果test',
        'D:/PyCharm 2019.2/projects/lstm_softmax/dataset/test/910欢迎感谢再见test'
    ]
    # 五个意图统计
    result = []
    lstm_model = load_model('D:\\PyCharm 2019.2\\projects\\lstm_softmax\\output\\bert_lstm_softmax.h5')
    for path in paths:
        fopen = open(path, 'r', encoding='utf-8')
        count = [0, 0, 0, 0, 0]
        for line in fopen:
            temp = input_transform([line])
            res = lstm_model.predict(temp)
            max = 0
            max_index = -1
            for index, val in enumerate(res[0]):
                if val > max:
                    max = val
                    max_index = index
            count[max_index] = count[max_index] + 1
        result.append(count)
        fopen.close()

    print(result)

    debuf= 0
Beispiel #21
0
def getClassifier(num_words, EMBEDDING_DIM, embedding_matrix, MAX_SEQUENCE_LENGTH, model_path=None, learning_rate=0.01):
    with tf.device('/cpu:0'):
        embedding_layer=Embedding((num_words+1), EMBEDDING_DIM, weights=[embedding_matrix],input_length=MAX_SEQUENCE_LENGTH,
                                trainable=True)
        sequence_input=Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
        embedded_sequences=embedding_layer(sequence_input)

    with tf.device('/device:GPU:0'):
        embedded_sequences=Dropout(0.3)(embedded_sequences)
        #embedded_sequences=SimpleRNN(50, return_sequences=True)(embedded_sequences)
        embedded_sequences=Conv1D(50, 3, activation='relu')(embedded_sequences)
        #doc=AttentionWithContext()(embedded_sequences)
        doc=GlobalAveragePooling1D()(embedded_sequences)
        doc=Dense(2)(doc)
        preds=Activation('softmax')(doc)
    classifier = Model(sequence_input, preds)
    optimizer = optimizers.Adagrad(lr=learning_rate, decay=0.01)
    classifier.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['mse', 'accuracy'])

    if model_path is not None:
        logger.info(" >>> Load pretrained classifier at {} and transfer weights".format(model_path))
        pretrained_model = load_model(model_path)
        pretrained_layers = [l for l in pretrained_model.layers if "embedding" not in l.name]
        layers = [l for l in classifier.layers if "embedding" not in l.name]
        assert(len(pretrained_layers) == len(layers))
        for pre_l, cur_l in zip(pretrained_layers, layers):
            cur_l.set_weights(pre_l.get_weights())
    return classifier
Beispiel #22
0
    def test(self, seed):

        #model = self.prepareModel()
        #model.load_weights(filename)
        #model.compile(loss='categorical_crossentropy', optimizer='adam')
        model = load_model('c13.h5')
        # pick a random seed
        start = np.random.randint(0, len(self.dataX) - 1)
        #pattern = self.dataX[start]

        seed = seed.lower()
        seed = re.sub(',', ' ', seed)
        seed = word_tokenize(seed)

        seed = seed[0:100]
        seedInd = [self.word_idx[token] for token in seed]

        print("Seed:")
        print("\"", ' '.join([self.idx_word[value] for value in seedInd]),
              "\"")

        for i in range(250):
            x = np.reshape(seedInd, (1, len(seedInd), 1))
            x = x / float(self.num_words)
            prediction = model.predict(x, verbose=0)
            index = self.beamSearch(model, prediction, seedInd, self.num_words)
            #index = np.argmax(prediction)
            result = self.idx_word[index]
            seq_in = [self.idx_word[value] for value in seedInd]
            sys.stdout.write(result)
            sys.stdout.write(' ')
            seedInd.append(index)
            seedInd = seedInd[1:len(seedInd)]

        print("\nDone.")
Beispiel #23
0
def launch_clf(input_matrix, tag_matrix):
    X_train, X_test, y_train, y_test = train_test_split(input_matrix,
                                                        tag_matrix,
                                                        test_size=0.33)

    max_seq_size = 86
    nb_labels = 5000
    entree = Input(shape=(max_seq_size, ), dtype='int32')
    emb = Embedding(len(tag_matrix), 100)(entree)
    bi = LSTM(100, return_sequences=True)(emb)
    # bi = Bidirectional(LSTM(config.hidden, return_sequences=True))(emb)
    # bi = CuDNNLSTM(100, return_sequences=True)(emb)
    drop = Dropout(0.5)(bi)
    out = TimeDistributed(Dense(units=nb_labels, activation='softmax'))(drop)

    model = Model(inputs=entree, outputs=out)
    model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
    # or use loss categorical_crossentropy
    model.fit(X_train,
              y_train,
              validation_data=(X_test, y_test),
              batch_size=16,
              epochs=10)
    model.save(BASE_DIR + 'results/model_cu_10.h5')
    model = load_model(BASE_DIR + 'results/model_cu_10.h5')

    res = model.predict(X_test).argmax(-1)
    ev = model.evaluate(res, y_test, batch_size=64)

    return ev, res, y_test, X_test
Beispiel #24
0
def model_test(input_data):
    X_test, _ = test_load_office(input_data)
    model = load_model("model_djt.h5")
    prediction = model.predict(X_test, verbose=1)
    prediction = np.argmax(prediction, axis=1)
    prediction = prediction + 1
    return prediction
Beispiel #25
0
def visualize_car(img_path):
    # Get car detection result and trained model
    boxes = object_detection_api(img_path)
    model = load_model('model.h5')

    img = io.imread(img_path)
    fig, ax = plt.subplots()
    ax.imshow(img)

    for i in range(len(boxes)):
        # Use bounding boxes to crop patch
        left, top = int(boxes[i][0][0]), int(boxes[i][0][1])
        right, bottom = int(boxes[i][1][0]), int(boxes[i][1][1])

        # Ignore patches that are too small
        width, height = right - left, bottom - top
        if width >= 30 and height >= 30:
            patch = img[top:bottom, left:right]
            patch = cv2.resize(patch, dsize=(97, 53), interpolation=cv2.INTER_CUBIC)
            patch = patch.reshape((1, 97, 53, 3))

            # Predict viewpoint
            prediction = model.predict_classes(patch)
            dx, dy = ANGLE_DICT[prediction[0]][0], ANGLE_DICT[prediction[0]][1]

            # Visualize car bounding box and angle
            center_x, center_y = int((left + right) / 2), int((top + bottom) / 2)
            rect = plt.Rectangle((left, top), width, height, fill=False, linewidth=2, color='lime')
            plt.arrow(center_x, center_y, dx * 20, -dy * 20, color='w', linewidth=3, head_width=20, head_length=4)
            plt.text(center_x, center_y, ANGLE_LABEL[prediction[0]], fontsize=12, color='lime')
            ax.add_patch(rect)

    plt.show()
Beispiel #26
0
def continue_training():
    """Continues training the chesspiece model based on SqueezeNet-v1.1.
    """
    model = load_model("./models/SqueezeNet1p1.h5")

    train_generator, validation_generator = data_generators(
        preprocess_input, (227, 227), 64)

    # Train all layers
    for layer in model.layers:
        layer.trainable = True

    model.compile(optimizer='Adam', loss='categorical_crossentropy',
                  metrics=['accuracy'])

    callbacks = model_callbacks(20, "./models/SqueezeNet1p1_all.h5", 0.2, 8)

    history = train_model(model, 100, train_generator, validation_generator,
                          callbacks, use_weights=False, workers=5)

    plot_model_history(history, "./models/SqueezeNet1p1_all_acc.png",
                       "./models/SqueezeNet1p1_all_loss.png")
    evaluate_model(model, validation_generator)

    model.save("./models/SqueezeNet1p1_all_last.h5")
Beispiel #27
0
def single_results(model_path, weights_path, network_model, label_dict,
                   image_dir, image_cls, resolution, depth, out_file,
                   datasets):
    from keras.engine.saving import load_model
    from .data.image.loader import UCRImageLoader
    from .data.all import load_labeldict
    from .results import SingleResultsParser
    from .network.base import LoadedNetworkModel

    img_rows, img_cols = list(map(int, resolution.split("x")))
    image_cls = image_classes[image_cls]

    label_path = Path(label_dict)

    label_dict = load_labeldict(label_path)
    loader = UCRImageLoader.from_path(image_dir, (img_rows, img_cols), depth,
                                      image_cls)
    if datasets is not None:
        datasets = datasets.split(',')

    if model_path and not weights_path:
        network = LoadedNetworkModel()
        network.model = load_model(str(Path(model_path)))
    elif not model_path and weights_path and network_model:
        network_model_cls = network_models_classes[network_model]
        num_classes = sum(len(d.keys()) for _, d in label_dict.items())
        network = network_model_cls(image_cls, img_rows, img_cols, depth,
                                    num_classes, weights_path)
        network.load_weights(by_name=True)

    res = SingleResultsParser(loader, network, label_dict)
    res.as_csv(datasets, out_file)
Beispiel #28
0
def main(_):
    with open(os.path.join(FOLDER, "chars.txt"), "r", encoding="utf-8") as char_file:
        chars = char_file.read().splitlines()
        char_file.close()
    chars.insert(0, "\n")
    char_indexes = pickle.load(open(os.path.join(FOLDER, "char_indexes.pkl"), "rb"))
    index_char = pickle.load(open(os.path.join(FOLDER, "index_char.pkl"), "rb"))
    model = load_model(os.path.join(FOLDER, "model.h5"))

    text = input("Write the text : ")
    text += " "
    for x in range(500):
        text_matrix = np.zeros((1, len(text), len(chars)))
        for _x, _c in enumerate(text):
            text_matrix[0, _x, char_indexes[_c]] = 1
        prob = model.predict(text_matrix)[0]
        prob = np.asarray(prob).astype("float64")
        prob = np.log(prob) / 0.6
        exp_prob = np.exp(prob)
        pred = exp_prob / np.sum(exp_prob)
        sol_list = np.random.multinomial(1, pred, 1)

        _char = index_char[np.argmax(sol_list)]
        text = text[:] + _char

    print(text)
def classify(imgdb):
    result_imgs = []
    print("classifying " + str(len(imgdb)) + ' images')
    for image in imgdb:
        model = load_model('models/fcnn_bin_simple.h5',
                           custom_objects={
                               'loss':
                               weighted_categorical_crossentropy(
                                   [0.4, 0.5, 0.1]),
                               'IoU':
                               IoU
                           })
        orgim = np.copy(image)
        # assume image in binary
        image = img_as_float(gray2rgb(image))
        maskw = int((np.ceil(image.shape[1] / BOXWDITH) * BOXWDITH)) + 1
        maskh = int((np.ceil(image.shape[0] / BOXWDITH) * BOXWDITH))
        mask = np.ones((maskh, maskw, 3))
        mask2 = np.zeros((maskh, maskw, 3))
        mask[0:image.shape[0], 0:image.shape[1]] = image
        for y in tqdm(range(0, mask.shape[0], STRIDE), unit='batch'):
            x = 0
            if (y + BOXWDITH > mask.shape[0]):
                break
            while (x + BOXWDITH) < mask.shape[1]:
                input = mask[y:y + BOXWDITH, x:x + BOXWDITH]
                std = input.std() if input.std() != 0 else 1
                mean = input.mean()
                mask2[y:y + BOXWDITH, x:x + BOXWDITH] = model.predict(
                    np.array([(input - mean) / std]))[0]
                x = x + STRIDE
        result_imgs.append(mask2[0:image.shape[0], 0:image.shape[1]])
    return result_imgs
def predict(path=None):
    t = time.asctime(time.localtime(time.time()))
    print(t)
    logging.info(t)
    print(path)
    logging.info(path)
    localpath = os.getcwd()
    modelpath = localpath + '/model/ConvolutionModel.h5'
    if not os.path.exists(modelpath):
        log = '模型文件不存在,请检查' + modelpath + '是否存在!'
        print(log)
        logging.warning(log)
        return log
    model = load_model(modelpath)
    im = Image.open(path)
    im.show()
    im = im.convert("L")  # 转灰度
    im = np.array(im)  # 转矩阵
    im = im / 255.  # 归一化
    im1 = np.ones((28, 28))
    im = im1 - im  # 反转
    img = im.reshape(1, - 1, 28, 28)
    predict = model.predict_classes(img)
    print(predict[0])
    logging.info('预测结果为:' + str(predict[0]))
    return predict[0]
Beispiel #31
0
def main(args):
    if args.cmd == 'train':
        model = create_model()
        train_by_random(model)

    if args.cmd == 'play':
        model = load_model(args.filename)
        model.summary()
        play_against_model(model)

    if args.cmd == 'eval':
        model = load_model(args.filename)
        model.summary()
        boards = ""
        for line in sys.stdin:
            boards += line
        eval_boards(boards, model)