Exemplo n.º 1
0
    def make_prediction(self):
        K.reset_uids()
        model = ""
        weights = ""
        classes = {
            'TRAIN': ['GRADE 0', 'GRADE 1', 'GRADE 2', 'GRADE 3', 'GRADE 4'],
            'VALIDATION': ['GRADE 0', 'GRADE 1', 'GRADE 2', 'GRADE 3', 'GRADE 4'],
            'TEST': ['GRADE 0', 'GRADE 1', 'GRADE 2', 'GRADE 3', 'GRADE 4'],
        }    
        
        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            with open(model, 'r') as f:
                model = model_from_json(f.read())
                model.load_weights(weights)

        xray_image = image.load_img(self.img, target_size=(224, 224))
        x = image.img_to_array(xray_image)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        model.compile(loss="categorical_crossentropy", metrics=[
                      "accuracy"], optimizer="adam")
        result = model.predict(x)

        pred_name = classes['TRAIN'][np.argmax(result)]

        messages.success(result, f"The osteoarthritis is predicted to be {pred_name}".format(pred_name))

        return pred_name
Exemplo n.º 2
0
    def __init__(self, maxlen=85, step=1, batch_size=128):
        """

        :param maxlen:
        :param step:
        :param batch_size:
        """
        # os.chdir('./')

        # learning hyper-parameters
        self.maxlen = maxlen
        self.step = step
        self.batch_size = batch_size

        self.text_all = ''
        self.text_training = ''
        self.text_validation = ''
        self.text_test = ''

        self.chars = None
        self.char_indices = None
        self.indices_char = None

        # self.model = None
        K.reset_uids()
        K.clear_session()

        self.load_dataset()
Exemplo n.º 3
0
 def loadRNN(model_file_name, weight_file_name):
     K.reset_uids()
     with open(model_file_name + '.json', 'r') as f:
         model = model_from_json(f.read())
     model.load_weights(weight_file_name + '.h5')
     print("Red Neuronal Cargada desde Archivo")
     return model
def running(options):
    fold = 1
    dt = Dataset(options.dataset)
    # load data
    train_datas, train_labels, val_datas, val_labels, test_datas, test_labels \
        = dt.load_data(nevents=options.num_events, nsamples=options.num_samples, fold=fold)
    sequential_test_datas = dt.sequentialize_data(test_datas,
                                                  timestep=options.time_step)
    random.shuffle(sequential_test_datas)

    # build model and load weights
    att_s_beta_vae = AttSBetaVAE(options)
    K.reset_uids()
    sbvae = att_s_beta_vae.build_model(options)
    sbvae.load_weights(options.result_path + sbvae.name + '/fold_' +
                       str(fold) + '_last_weight.h5')

    # get the bottleneck features
    h_out = np.empty((1, 15))
    num_to_plot = 300
    for i in range(options.num_events):
        h_fnc = K.Function([sbvae.input], [sbvae.layers[15 + i].output])
        h = h_fnc([sequential_test_datas[:num_to_plot]])[0]
        h_out = np.concatenate([h_out, h])

    # visualization
    dt.visualization(datas=h_out[1:], name='Att_s_beta_VAE')
def objective(**X):
    print('New configuration: {}'.format(X))

    model = build_custom_model(hidden=X["hidden_layers"],
                               nodes=X["initial_nodes"],
                               lrate=X["learning_rate"],
                               regulator=X["regulator"],
                               pattern=X["node_pattern"],
                               activation=X["activation_function"])
    model.save(modelName)
    model.summary()

    batchSize = str(2**X["batch_power"])
    commandString = "python TMVAClassification_Optimization.py -o {} -b {} -e {} -w {} -y {} -d {}".format(
        outf_key, batchSize, epochs, where, year, dataset)
    os.system(commandString)
    temp_name = dataset + "/temp_file.txt"
    ROC = float(open(temp_name, "r").read())

    # Reset the session
    del model
    backend.clear_session()
    backend.reset_uids()

    # Record the optimization iteration
    logFile.write('{:7}, {:7}, {:7}, {:7}, {:9}, {:14}, {:10}, {:7}\n'.format(
        str(X["hidden_layers"]), str(X["initial_nodes"]),
        str(np.around(X["learning_rate"], 5)), str(batchSize),
        str(X["node_pattern"]), str(X["regulator"]),
        str(X["activation_function"]), str(np.around(ROC, 5))))
    opt_metric = (1.0 - ROC)
    print("Optimization metric value obtained = {:.5f}".format(opt_metric))
    os.system("rm dataset/temp_file.txt")
    return opt_metric  # since the optimizer tries to minimize this function and we want a larger ROC value
Exemplo n.º 6
0
    def predict(self):

        K.reset_uids()

        classes = ['Normal', 'Neumonia']

        modelo = 'neumonia/model/model_neumonia_v41.json'
        pesos = 'neumonia/model/weights_neumonia_v41.h5'

        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            with open(modelo, 'r') as f:
                model = model_from_json(f.read())
                model.load_weights(pesos)

        img = image.load_img(self.img, target_size=(150, 150))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        resultado = preds[0]  #solo una dimension

        porcentaje = np.round(resultado * 100, 2)
        porcentaje = list(porcentaje)

        respuesta = np.argmax(resultado)  # el valor mas alto de resultado

        for i in range(len(classes)):
            res = classes[i]

            if i == respuesta:
                return 'Resultado: {:.4}% {}'.format(round(max(porcentaje), 2),
                                                     res)
Exemplo n.º 7
0
def cargar_rnn(nombreArchivoModelo, nombreArchivoPesos):
    k.reset_uids()
    # Cargar la Arquitectura desde el archivo JSON
    with open(nombreArchivoModelo + '.json', 'r') as f:
        model = model_from_json(f.read())
    # Cargar Pesos (weights) en el nuevo modelo
    model.load_weights(nombreArchivoPesos + '.h5')
    return model
Exemplo n.º 8
0
 def cargarRNN(nombreArchivoModelo, nombreArchivoPesos):
     K.reset_uids()
     # Cargar la Arquitectura desde el archivo JSON
     with open(nombreArchivoModelo + '.json', 'r') as f:
         model = model_from_json(f.read())
     # Cargar Pesos (weights) en el nuevo modelo
     model.load_weights(nombreArchivoPesos + '.h5')
     print("Red Neuronal Cargada desde Archivo")
     return model
Exemplo n.º 9
0
    def __init__(self, image_size, B, n_classes, is_learning_phase=False):
        K.set_learning_phase(int(is_learning_phase))
        K.reset_uids()

        self.image_size = image_size
        self.n_cells = self.image_size // 32
        self.B = B
        self.n_classes = n_classes

        self.m = self.buildModel()
def cargar_modelo(url_modelo, url_pesos):
    k.reset_uids()
    with open(url_modelo, 'r') as f:
        print('INTENTA LEER <<___ ' * 5)
        model = model_from_json(f.read())
        print('FINALIZA EL LEER <----' * 10)
    # Cargar Pesos (weights) en el nuevo modelo.json
    model.load_weights(url_pesos)
    print("Red Neuronal Cargada desde Archivo")
    return model
Exemplo n.º 11
0
def running(options):
    dt = Dataset(options.dataset)
    folds = options.nfolds
    # 1.First construct polyphonic datasets by mixing single event sound, and extract MFCCs features.
    if options.mix_data:
        dt.mix_data(nevents=options.num_events, nsamples=options.num_samples)
    f1_list, er_list, fold_list = [], [], []

    att_s_beta_vae = AttSBetaVAE(options)
    for k in range(1, folds + 1):
        # 2.Load data.
        train_datas, train_labels, test_datas, test_labels \
            = dt.load_data(nevents=options.num_events, nsamples=options.num_samples, fold=k)
        sequential_train_datas = dt.sequentialize_data(
            train_datas, timestep=options.time_step)
        sequential_test_datas = dt.sequentialize_data(
            test_datas, timestep=options.time_step)

        # 3.Create attention-based supervised beta-VAE model and train it.
        K.reset_uids()
        model = att_s_beta_vae.build_model(options)
        att_s_beta_vae.train_model(model,
                                   x_train=sequential_train_datas,
                                   y_train=train_labels,
                                   fold=k)

        # 4.Evaluate the performance on F1 and ER
        # Param: supervised is set to 'False' default. 'True' for supervised beta-VAE and 'False' for others.
        # This function evaluate the segment-based F1 score and ER.
        f1_score, error_rate = att_s_beta_vae.metric_model(
            model,
            sequential_test_datas,
            test_labels,
            supervised=True,
            new_weight_path='fold_' + str(k) + '_last_weight.h5')

        print(
            'Fold {fold}, nevents {nevents}, nsamples {nsamples} ==> error_rate: {error_rate}, f1_score: {f1_score}'
            .format(fold=k,
                    nevents=options.num_events,
                    nsamples=options.num_samples,
                    error_rate=error_rate,
                    f1_score=f1_score))
        f1_list.append(f1_score)
        er_list.append(error_rate)
        fold_list.append(k)
        del model
    f1_list.append(np.mean(f1_list))
    er_list.append(np.mean(er_list))
    fold_list.append('AVER')
    result_df = pd.DataFrame({'F1': f1_list, 'ER': er_list}, index=fold_list)
    result_df.to_csv(options.result_path + options.name + '_' +
                     str(options.num_events) + '/K_Folds_results.csv')
    return result_df
Exemplo n.º 12
0
def cargar_rnn(nombreArchivoModelo, nombreArchivoPesos):
    print('INICIA PROCES CARGA <---------------------------------------------------')
    k.reset_uids()
    # Cargar la Arquitectura desde el archivo JSON
    with open(nombreArchivoModelo + '.json', 'r') as f:
        print('INTENTA LEER <<___ '*5)
        model = model_from_json(f.read())
        print('FINALIZA EL LEER <----'*10)
    # Cargar Pesos (weights) en el nuevo modelo
    model.load_weights(nombreArchivoPesos + '.h5')
    print("Red Neuronal Cargada desde Archivo")
    return model
Exemplo n.º 13
0
    def train(self, data, verbose=True):
        """ Train all models and return the best one.

        Models are evaluated and ranked according to their ROC-AUC on a validation data set.

        Parameters
        ----------
        data: pysster.Data
            A Data object providing training and validation data sets.
        
        verbose: bool
            If True, progress information (train/val loss) will be printed throughout the training.

        Returns
        -------
        results: tuple(pysster.Model, str)
            The best performing model and an overview table of all models are returned.
        """
        best_model_path = "{}/{}".format(
            gettempdir(),
            ''.join(random.choice(string.ascii_uppercase) for _ in range(20)))
        aucs = []
        max_auroc = -1
        for i, candidate in enumerate(self.candidates):
            model = Model(candidate, data)
            model.train(data, verbose)
            predictions = model.predict(data, "val")
            labels = data.get_labels("val")
            report = utils.performance_report(labels, predictions)
            roc_auc = np.sum(report[:, 0:-1] * report[:, -1, np.newaxis],
                             axis=0)
            roc_auc = (roc_auc / np.sum(report[:, -1]))[3]
            aucs.append(roc_auc)
            if aucs[-1] > max_auroc:
                max_auroc = aucs[-1]
                utils.save_model(model, best_model_path)
            K.clear_session()
            K.reset_uids()
            if not verbose: continue
            print("\n=== Summary ===")
            print("Model {}/{} = {:.5f} weighted avg roc-auc".format(
                i + 1, len(self.candidates), aucs[i]))
            for param in candidate:
                if not param in ["input_shape"]:
                    print(" - {}: {}".format(param, candidate[param]))
        # load the best model (and remove it from disc)
        model = utils.load_model(best_model_path)
        remove(best_model_path)
        remove("{}.h5".format(best_model_path))
        # save a formatted summary of all trained models
        table = self._grid_search_table(aucs)
        return model, table
    def predict(self):
    
        K.reset_uids()
        
        ixtoword = load(open("pickle_files/ixtoword.pkl", "rb"))
        wordtoix = load(open("pickle_files/wordtoix.pkl", "rb"))
        
        inc_model = InceptionV3(weights='imagenet')
        model_new = Model(inc_model.input, inc_model.layers[-2].output)
        
        # model = loadModel("final_model")
        model_file_name = "final_model"
        weights_file_name = None
        if weights_file_name is None:
            weights_file_name = model_file_name
        # load json and create model
        json_file = open('model_weights/{}.json'.format(model_file_name), 'r')
        loaded_model_json = json_file.read()
        json_file.close()
        model = model_from_json(loaded_model_json)
        # load weights into new model
        model.load_weights("model_weights/{}.h5".format(weights_file_name))
        model.load_weights('./model_weights/{}'.format("model_7.h5"))

        # Preprocess
        img = image.load_img(self.media, target_size=(299, 299))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        # Imagenet Feature Vector
        fea_vec = model_new.predict(x) 
        fea_vec = np.reshape(fea_vec, (1,2048))

        # Getting Caption
        in_text = 'startseq'
        max_length = 34
        for i in range(max_length):
            sequence = [wordtoix[w] for w in in_text.split() if w in wordtoix]
            sequence = pad_sequences([sequence], maxlen=max_length)
            yhat = model.predict([fea_vec,sequence], verbose=0)
            yhat = np.argmax(yhat)
            word = ixtoword[yhat]
            in_text += ' ' + word
            if word == 'endseq':
                break
        final = in_text.split()
        final = final[1:-1]
        final = ' '.join(final)
        return final
Exemplo n.º 15
0
def objectDetect():
    K.reset_uids()
    tb._SYMBOLIC_SCOPE.value = True
    print("called")
    dir = os.path.join(settings.MEDIA_ROOT, "hello.jpeg")
    img = load_img(dir, target_size=(197, 197))
    x = img_to_array(img)
    x = x.reshape(1, 197, 197, 3).astype('float')
    ans = model.predict(x)
    print(ans)
    f = 0
    for i in range(3):
        if ans[0][i] >= 0.8:
            f = 1
            return i + 1
    if f == 0:
        return -1
Exemplo n.º 16
0
    def _modify_graph(self, name):
        g = tf.get_default_graph()
        with g.gradient_override_map({'Relu': name}):

            # get layers that have an activation
            layer_dict = [
                layer for layer in self.model.layers[1:]
                if hasattr(layer, 'activation')
            ]

            # replace relu activation
            for layer in layer_dict:
                if layer.activation == keras.activations.relu:
                    layer.activation = tf.nn.relu

            # re-instanciate a new model
            K.reset_uids()
            new_model = self.model_func(weights='imagenet')
        return new_model
Exemplo n.º 17
0
def reset_layer_names(args):
    '''In case of transfer learning, it's important that the names of the weights match
    between the different networks (e.g. 2X and 4X). This function loads the lower-lever
    SR network from a reset keras session (thus forcing names to start from naming index 0),
    loads the weights onto that network, and saves the weights again with proper names'''

    # Find lower-upscaling model results
    BASE = os.path.join(args.weight_path,
                        args.modelname + '_' + str(args.scaleFrom) + 'X.h5')
    assert os.path.isfile(BASE), 'Could not find ' + BASE

    # Load previous model with weights, and re-save weights so that name ordering will match new model
    prev_model = ESPCN(upscaling_factor=args.scaleFrom, channels=args.channels)
    prev_model.load_weights(BASE)
    prev_model.save_weights(args.weight_path + args.modelname)

    #del prev_model
    K.reset_uids()
    gc.collect()
    return BASE
Exemplo n.º 18
0
def reset_layer_names(args):
    '''In case of transfer learning, it's important that the names of the weights match
    between the different networks (e.g. 2X and 4X). This function loads the lower-lever
    SR network from a reset keras session (thus forcing names to start from naming index 0),
    loads the weights onto that network, and saves the weights again with proper names'''

    # Find lower-upscaling model results
    BASE_G = os.path.join(args.weight_path, 'SRGAN_'+args.dataname+'_generator_'+str(args.scaleFrom)+'X.h5')
    BASE_D = os.path.join(args.weight_path, 'SRGAN_'+args.dataname+'_discriminator_'+str(args.scaleFrom)+'X.h5')
    assert os.path.isfile(BASE_G), 'Could not find '+BASE_G
    assert os.path.isfile(BASE_D), 'Could not find '+BASE_D
    
    # Load previous model with weights, and re-save weights so that name ordering will match new model
    prev_gan = SRGAN(upscaling_factor=args.scaleFrom)
    prev_gan.load_weights(BASE_G, BASE_D)
    prev_gan.save_weights(args.weight_path+'SRGAN_'+args.dataname)
    del prev_gan
    K.reset_uids()
    gc.collect()
    return BASE_G, BASE_D
Exemplo n.º 19
0
    def predict(self):
        K.reset_uids()

        model = 'face_classifier/model/short_arc_model_arc.json'
        weights = 'face_classifier/model/short_arc_model_weights.h5'

        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            with open(model, 'r') as f:
                model = model_from_json(f.read())
                model.load_weights(weights)

        img = image.load_img(self.img, target_size=(64, 64), grayscale=True)
        x = image.img_to_array(img)
        x = x/255.0
        x = np.expand_dims(x, axis=0)
        y_prob = model.predict(x)
        person_profile_id = y_prob.argmax(axis=-1)
        with open('face_classifier/model/map_person.json', 'r') as f:
            mapper = json.load(f)

        return mapper['people'][str(person_profile_id[0])]
Exemplo n.º 20
0
    def classify(self):
        K.reset_uids()  #	reset graph identifiers
        model = VGG16(weights='imagenet',
                      include_top=True)  # use keras pretrained model VGG16

        img = image.load_img(
            '/home/grh/Desktop/webProgramming/django-apps/imageInterpretation/media/cat.jpeg',
            target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        result_features = model.predict(x)
        result0 = imagenet_utils.decode_predictions(result_features)
        result = ""
        #result['content']=""

        for (i, (predID, pred, probability)) in enumerate(result0[0]):
            result = result + "\n" + "{}.-  {}: {:.2f}%".format(
                i + 1, pred, probability * 100)
            #result['content'] = result['content'] + "\n" + "{}.-  {}: {:.2f}%".format(i+1, pred, probability*100)
        return result
Exemplo n.º 21
0
    def predict(self):
    
        K.reset_uids()
        
        model = 'cnn/model/model_mobilenet.json'
        weights = 'cnn/model/weights_mobilenet.h5'

        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            with open(model, 'r') as f:
                model = model_from_json(f.read())
                model.load_weights(weights)

        img = image.load_img(self.img, target_size=(224,224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0) 
        x = preprocess_input(x)
        result = model.predict(x)
        result_decode = imagenet_utils.decode_predictions(result)

        for (i, (predId, pred, prob)) in enumerate(result_decode[0]):
            return "{}.-  {}: {:.2f}%".format(i + 1, pred, prob * 100)
    def predict(self):

        K.reset_uids()

        model = 'cnn_counting_leaf/model/model.json'
        weights = 'cnn_counting_leaf/model/weights_model.h5'

        with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
            with open(model, 'r') as f:
                model = model_from_json(f.read())
                model.load_weights(weights)

        img = image.load_img(self.img, target_size=(299, 299))

        img = image.img_to_array(img)
        img /= 255
        img = np.expand_dims(img, axis=0)

        result = model.predict(img)

        return "{}".format(int(result.flatten()[0]))
Exemplo n.º 23
0
def running(options):
    dt = Dataset(options.dataset)
    # 1.First construct polyphonic datasets by mixing single event sound, and extract MFCCs features.
    if options.mix_data:
        dt.mix_data(nevents=options.num_events,
                    nsamples=options.num_samples,
                    isUnbalanced=True)

    f1_list, er_list, fold_list = [], [], []

    att_s_beta_vae = AttSBetaVAE(options)
    # 2.Load data.
    train_datas, train_labels, test_datas, test_labels \
        = dt.load_data(nevents=options.num_events, nsamples=options.num_samples, fold=1, isUnbalanced=True)
    sequential_train_datas = dt.sequentialize_data(train_datas,
                                                   timestep=options.time_step)
    sequential_test_datas = dt.sequentialize_data(test_datas,
                                                  timestep=options.time_step)

    # 3.Create attention-based supervised beta-VAE model and train it with unbalanced datas.
    K.reset_uids()
    model = att_s_beta_vae.build_model(options)
    att_s_beta_vae.train_model(model,
                               x_train=sequential_train_datas,
                               y_train=train_labels,
                               fold=1,
                               new_weights=options.result_path + model.name +
                               '/fold_1_last_weight_DA.h5')

    # 4.Evaluate the performance on F1 and ER
    # Param: supervised is set to 'False' default. 'True' for supervised beta-VAE and 'False' for others.
    # This function evaluate the segment-based F1 score and ER.
    f1_score, error_rate = att_s_beta_vae.metric_model(
        model,
        sequential_test_datas,
        test_labels,
        supervised=True,
        new_weight_path='fold_1_last_weight_DA.h5')

    print(
        'Before data augmentation '
        '>>> nevents {nevents}, nsamples {nsamples} ==> error_rate: {error_rate}, f1_score: {f1_score}'
        .format(nevents=options.num_events,
                nsamples=options.num_samples,
                error_rate=error_rate,
                f1_score=f1_score))

    # 5.define the function to get z^*, here the inefficient category is the first events.
    z_star_fnc = K.Function([model.input], [model.layers[14].output])
    # here x are the input raw features extracted the first category
    x = []
    index = []
    y_addition = []
    for idx in range(len(train_labels)):
        y = train_labels[idx]
        if (y == [1, 0, 0, 0, 0]).all():
            index.append(idx)
            x.append(sequential_train_datas[idx])
            y_addition.append(y)
    z_star = z_star_fnc([x])[0]
    # 6.define the decoder
    decoder_fnc = K.Function([model.layers[6].output], [model.output[0]])
    generated_data = decoder_fnc([z_star])[0]

    # 7.augment the training set with generated data
    sequential_train_datas = np.concatenate(
        [sequential_train_datas, generated_data])
    train_labels = np.concatenate([train_labels, y_addition])

    # 8.retrain the model
    K.reset_uids()
    model = att_s_beta_vae.build_model(options)
    att_s_beta_vae.train_model(model,
                               x_train=sequential_train_datas,
                               y_train=train_labels,
                               fold=1,
                               new_weights=options.result_path + model.name +
                               '/fold_1_last_weight_DA_after.h5')
    f1_score_DA, error_rate_DA = att_s_beta_vae.metric_model(
        model,
        sequential_test_datas,
        test_labels,
        supervised=True,
        new_weight_path='fold_1_last_weight_DA_after.h5')

    print(
        'nevents {nevents}, nsamples {nsamples} ==> error_rate: {error_rate}, f1_score: {f1_score}'
        .format(nevents=options.num_events,
                nsamples=options.num_samples,
                error_rate=error_rate_DA,
                f1_score=f1_score_DA))
    fold_list.append('AVER')
    result_df = pd.DataFrame({'F1': f1_list, 'ER': er_list}, index=fold_list)
    result_df.to_csv(options.result_path + options.name + '_' +
                     str(options.num_events) + '/K_Folds_results.csv')
    return result_df
Exemplo n.º 24
0
    def train(self, data, pr_auc=False, verbose=True):
        """ Train all models and return the best one.

        Models are evaluated and ranked according to their ROC-AUC or PR-AUC (precision-recall)
        on a validation data set.

        Parameters
        ----------
        data: pysster.Data
            A Data object providing training and validation data sets.
        
        pr_auc: bool
            If True, the area under the precision-recall curve will be maximized instead of the area under the ROC curve

        verbose: bool
            If True, progress information (train/val loss) will be printed throughout the training.

        Returns
        -------
        results: tuple(pysster.Model, str)
            The best performing model and an overview table of all models are returned.
        """
        best_model_path = "{}/{}".format(
            gettempdir(),
            ''.join(random.choice(string.ascii_uppercase) for _ in range(20)))
        if True == pr_auc:
            metric_idx = 4
            metric_name = "pre-auc"
        else:
            metric_idx = 3
            metric_name = "roc-auc"
        metric = []
        max_metric = -1
        for i, candidate in enumerate(self.candidates):
            model = Model(candidate, data)
            model.train(data, verbose)
            predictions = model.predict(data, "val")
            labels = data.get_labels("val")
            report = utils.performance_report(labels, predictions)
            metric_val = np.sum(report[:, 0:-1] * report[:, -1, np.newaxis],
                                axis=0)
            metric_val = (metric_val / np.sum(report[:, -1]))[metric_idx]
            metric.append(metric_val)
            if metric[-1] > max_metric:
                max_metric = metric[-1]
                utils.save_model(model, best_model_path)
            K.clear_session()
            K.reset_uids()
            if not verbose: continue
            print("\n=== Summary ===")
            print("Model {}/{} = {:.5f} weighted avg {}".format(
                i + 1, len(self.candidates), metric[i], metric_name))
            for param in candidate:
                if not param in ["input_shape"]:
                    print(" - {}: {}".format(param, candidate[param]))
        # load the best model (and remove it from disc)
        model = utils.load_model(best_model_path)
        remove(best_model_path)
        remove("{}.h5".format(best_model_path))
        # save a formatted summary of all trained models
        table = self._grid_search_table(metric, metric_name)
        return model, table
Exemplo n.º 25
0
 def wrapped(*args, **kwargs):
     K.clear_session()
     K.reset_uids()
     return func(*args, **kwargs)
Exemplo n.º 26
0
  model.add(Conv1D(filters=40,kernel_size=6,padding='same', activation='relu'))
  model.add(Conv1D(filters=50,kernel_size=5,padding='same', activation='relu'))
  model.add(Conv1D(filters=50,kernel_size=5,padding='same', activation='relu'))
  model.add(Dense(1024,activation='relu'))
  #model.add(GlobalAveragePooling1D())
  model.add(Dense(1))
  # try using different optimizers and different optimizer configs
  # Compile Model
  print('Compile model...')
  model.compile(loss='mean_squared_error',
                optimizer='rmsprop',
                metrics=['mean_absolute_error', disag_error])
  # Train model ...
  early_stopping = EarlyStopping(monitor='val_loss', patience=2)
  print('Train...')
  model.fit(input_train, output_train,
            batch_size=batch_size,
            epochs=epochs,
            validation_split=0.2,
            callbacks=[early_stopping])

  model.save('convmodel/convz_b{}.h5'.format(i))
  gc.collect()
  K.reset_uids()
  K.clear_session()

  
  


import numpy as np
import streamlit as st
from keras import backend as keras_backend
from keras.applications.mobilenet import preprocess_input
from keras.applications import imagenet_utils
from keras.preprocessing import image
from keras.utils import CustomObjectScope
from tensorflow.keras.models import model_from_json
from tensorflow.keras.initializers import glorot_uniform

keras_backend.reset_uids()

model = "app/model/model_json.json"
weights = "app/model/mobilenet_imagenet.h5"

with CustomObjectScope({"GlorotUniform": glorot_uniform()}):
    with open(model, "r") as file:
        model = model_from_json(file.read())
        model.load_weights(weights)


@st.cache
def predict(img):
    img = image.load_img(img, target_size=(224, 224))

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    result = model.predict(x)
    result_decode = imagenet_utils.decode_predictions(result)