Example #1
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #2
0
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #3
0
def test_constant_initializer_with_numpy():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,),
                    kernel_initializer=Constant(np.ones((3, 2)))))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    json_str = model.to_json()
    model_from_json(json_str).summary()

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str).summary()
Example #4
0
    def _transform(self, df):
        '''
        Private transform method of a Transformer. This serves as batch-prediction method for our purposes.
        '''
        outputCol = self.getOutputCol()
        labelCol = self.getLabelCol()
        new_schema = df.schema
        new_schema.add(StructField(outputCol, StringType(), True))

        rdd = df.rdd.coalesce(1)
        features = np.asarray(rdd.map(lambda x: from_vector(x.features)).collect())
        # Note that we collect, since executing this on the rdd would require model serialization once again
        model = model_from_yaml(self.get_keras_model_config())
        model.set_weights(self.weights.value)
        predictions = rdd.ctx.parallelize(model.predict_classes(features)).coalesce(1)
        predictions = predictions.map(lambda x: tuple(str(x)))

        results_rdd = rdd.zip(predictions).map(lambda x: x[0] + x[1])
        # TODO: Zipping like this is very likely wrong
        # results_rdd = rdd.zip(predictions).map(lambda pair: Row(features=to_vector(pair[0].features),
        #                                        label=pair[0].label, prediction=float(pair[1])))
        results_df = df.sql_ctx.createDataFrame(results_rdd, new_schema)
        results_df = results_df.withColumn(outputCol, results_df[outputCol].cast(DoubleType()))
        results_df = results_df.withColumn(labelCol, results_df[labelCol].cast(DoubleType()))

        return results_df
Example #5
0
    def _fit(self, df):
        '''
        Private fit method of the Estimator, which trains the model.
        '''
        simple_rdd = df_to_simple_rdd(df, categorical=self.get_categorical_labels(), nb_classes=self.get_nb_classes(),
                                      featuresCol=self.getFeaturesCol(), labelCol=self.getLabelCol())
        simple_rdd = simple_rdd.repartition(self.get_num_workers())
        optimizer = None
        loss = None

        
        if self.get_optimizer_config() is not None:            
            optimizer = get(self.get_optimizer_config()['name'], self.get_optimizer_config())

        #in this code has exception KeyError: Param(parent='ElephasEstimator_470d82b85b77952bfaa0', name='loss_config', doc='Serialzed Elephas loss properties')
        # need to solve it        
        if self.get_loss_config() is not None:
            #import keras.objectives
            #loss = keras.objectives.get(self.get_loss_config()['name'], self.get_loss_config())
            loss = self.get_loss_config()

        keras_model = model_from_yaml(self.get_keras_model_config())
        #SparkModel class has two more option[loss, optimizer]
        spark_model = SparkModel(simple_rdd.ctx, keras_model, optimizer=optimizer, loss=loss,
                                 mode=self.get_mode(), frequency=self.get_frequency(),
                                 num_workers=self.get_num_workers())
        spark_model.train(simple_rdd, nb_epoch=self.get_nb_epoch(), batch_size=self.get_batch_size(),
                          verbose=self.get_verbosity(), validation_split=self.get_validation_split())

        model_weights = spark_model.master_network.get_weights()
        weights = simple_rdd.ctx.broadcast(model_weights)
        return ElephasTransformer(labelCol=self.getLabelCol(),
                                  outputCol='prediction',  # TODO: Set default value
                                  keras_model_config=spark_model.master_network.to_yaml(),
                                  weights=weights)
Example #6
0
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_nested_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Example #7
0
    def train(self, data_iterator):
        '''
        Train a keras model on a worker and send asynchronous updates
        to parameter server
        '''
        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        if x_train.size == 0:
            return
        model = model_from_yaml(self.yaml)
        print(self.optimizer)
        print(self.loss)
        model.compile(optimizer=self.optimizer,loss=self.loss)

        nb_epoch = self.train_config['nb_epoch']
        batch_size = self.train_config.get('batch_size')
        nb_train_sample = len(x_train[0])
        nb_batch = int(np.ceil(nb_train_sample/float(batch_size)))
        index_array = np.arange(nb_train_sample)
        batches = [(i*batch_size, min(nb_train_sample, (i+1)*batch_size)) for i in range(0, nb_batch)]

        if self.frequency == 'epoch':
            for epoch in range(nb_epoch):
                weights_before_training = get_server_weights(self.master_url)
                model.set_weights(weights_before_training)
                self.train_config['nb_epoch'] = 1
                if x_train.shape[0] > batch_size:
                    model.fit(x_train, y_train, show_accuracy=True, **self.train_config)
                weights_after_training = model.get_weights()
                deltas = subtract_params(weights_before_training, weights_after_training)
                put_deltas_to_server(deltas, self.master_url)
        elif self.frequency == 'batch':
            for epoch in range(nb_epoch):
                if x_train.shape[0] > batch_size:
                    for (batch_start, batch_end) in batches:
                        weights_before_training = get_server_weights(self.master_url)

                        #sometimes weights_before_training ans model.set_weights(params) type not match.
                        #at first weights_before_training type is scala, but model.set_weights() need numpy array
                        #and sometimes weight_before_training method does not work well.

                        if(len(weights_before_training)>0 ):
                            model.set_weights(weights_before_training)

                        batch_ids = index_array[batch_start:batch_end]
                        X = slice_X(x_train, batch_ids)
                        y = slice_X(y_train, batch_ids)
                        model.train_on_batch(X, y)
                        weights_after_training = model.get_weights()
                        if( len(weights_before_training) == len(weights_after_training) ):
                            deltas = subtract_params(weights_before_training, weights_after_training)
                        else:
                            deltas = weights_after_training
                        print(len(deltas))
                        put_deltas_to_server(deltas, self.master_url)
        else:
            print('Choose frequency to be either batch or epoch')
        yield []
Example #8
0
def test_recursive():
    # test layer-like API
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)

    # test serialization
    config = seq.get_config()
    new_graph = Sequential.from_config(config)

    seq.summary()
    json_str = seq.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = seq.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Example #9
0
def test_siamese_1():
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_input(name='input2', input_shape=(32,))

    graph.add_shared_node(Dense(4), name='shared', inputs=['input1', 'input2'], merge_mode='sum')
    graph.add_node(Dense(4), name='dense1', input='shared')
    # graph.add_node(Dense(4), name='output1', input='shared', create_output=True)

    # graph.add_output(name='output1', inputs=['dense1', 'shared'], merge_mode='sum')
    graph.add_output(name='output1', input='dense1')
    graph.compile('rmsprop', {'output1': 'mse'})

    graph.fit({'input1': X_train_graph, 'input2': X2_train_graph, 'output1': y_train_graph},
              nb_epoch=10)
    out = graph.predict({'input1': X_test_graph, 'input2': X2_test_graph})
    assert(type(out == dict))
    assert(len(out) == 1)

    loss = graph.test_on_batch({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    loss = graph.train_on_batch({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    loss = graph.evaluate({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    assert(loss < 5.0)

    # test serialization
    config = graph.get_config()
    new_graph = Graph.from_config(config)

    graph.summary()
    json_str = graph.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = graph.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Example #10
0
def test_1o_2i():
    # test a non-sequential graph with 2 inputs and 1 output
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_input(name='input2', input_shape=(32,))

    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input2')
    graph.add_node(Dense(4), name='dense3', input='dense1')

    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')
    graph.compile('rmsprop', {'output1': 'mse'})

    graph.fit({'input1': X_train_graph, 'input2': X2_train_graph, 'output1': y_train_graph},
              nb_epoch=2)
    out = graph.predict({'input1': X_test_graph, 'input2': X2_test_graph})
    assert(type(out == dict))
    assert(len(out) == 1)

    loss = graph.test_on_batch({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    loss = graph.train_on_batch({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})
    loss = graph.evaluate({'input1': X_test_graph, 'input2': X2_test_graph, 'output1': y_test_graph})

    # test serialization
    config = graph.get_config()
    new_graph = Graph.from_config(config)

    graph.summary()
    json_str = graph.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = graph.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Example #11
0
 def _load(cls, model_name):
     f_h = open('{}.yaml'.format(model_name), 'r')
     persisted_yaml_model = f_h.read()
     f_h.close()
     loaded_model = model_from_yaml(persisted_yaml_model)
     # load weights into retrieved model instance
     loaded_model.load_weights("{}.h5".format(model_name))
     return loaded_model
Example #12
0
    def train(self, data_iterator):
        feature_iterator, label_iterator = tee(data_iterator,2)
        X_train = np.asarray([x for x,y in feature_iterator])
        y_train = np.asarray([y for x,y in label_iterator])

        model = model_from_yaml(self.yaml)
        model.set_weights(self.parameters.value)
        model.fit(X_train, y_train, show_accuracy=True, **self.train_config)
        yield model.get_weights()
Example #13
0
    def predict(self, X_test, Y_test):

        model = model_from_json(open('model_architecture.json').read())
        model = model_from_yaml(open('model_architecture.yaml').read())
        model.load_weights('weights.h5')
        loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=32)

        classes = model.predict_classes(X_test, batch_size=32)

        proba = model.predict_proba(X_test, batch_size=32)
def load_model(hypes):
    """Load a serialized model."""
    yaml_path = hypes["segmenter"]["serialized_model_path"] + ".yaml"
    hdf5_path = hypes["segmenter"]["serialized_model_path"] + ".hdf5"
    with open(yaml_path) as f:
        yaml_string = f.read()
    model = model_from_yaml(yaml_string)
    model.load_weights(hdf5_path)
    model.compile(optimizer='adadelta', loss='categorical_crossentropy')
    return model
Example #15
0
    def train(self, partition_id, data_iterator):
        '''
        Train a keras model on a worker and send asynchronous updates
        to parameter server
        '''
        import time
        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        if x_train.size == 0:
            print("Empty Partition!")
            return

        model = model_from_yaml(self.yaml)
        model.compile(loss=self.keras_loss, optimizer=self.keras_optimizer, metrics=['accuracy'])
    
        nb_epoch = self.train_config['nb_epoch']
        batch_size = self.train_config.get('batch_size')
        nb_train_sample = x_train.shape[0]
        nb_batch = int(np.ceil(nb_train_sample/float(batch_size)))
        index_array = np.arange(nb_train_sample)
        batches = [(i*batch_size, min(nb_train_sample, (i+1)*batch_size)) for i in range(0, nb_batch)]
        if self.frequency == 'epoch':
            for epoch in range(nb_epoch):
                print('-'*40)
                print('Partition %d/%d: Epoch %d' %(partition_id+1,self.num_workers,epoch))
                print('-'*40)
                weights_before_training = get_server_weights(self.master_url)
                model.set_weights(weights_before_training)
                self.train_config['nb_epoch'] = 1
                if x_train.shape[0] > batch_size:
                    model.fit(x_train, y_train, **self.train_config)
                weights_after_training = model.get_weights()
                deltas = subtract_params(weights_before_training, weights_after_training)
                put_deltas_to_server(deltas, self.master_url)
        elif self.frequency == 'batch':
            for epoch in range(nb_epoch):
                print('-'*40)
                print('Partition %d/%d: Epoch %d' %(partition_id+1,self.num_workers,epoch))
                print('-'*40)
                if x_train.shape[0] > batch_size:
                    for (batch_start, batch_end) in batches:
                        weights_before_training = get_server_weights(self.master_url)
                        model.set_weights(weights_before_training)
                        batch_ids = index_array[batch_start:batch_end]
                        X = slice_X(x_train, batch_ids)
                        y = slice_X(y_train, batch_ids)
                        model.train_on_batch(X, y)
                        weights_after_training = model.get_weights()
                        deltas = subtract_params(weights_before_training, weights_after_training)
                        put_deltas_to_server(deltas, self.master_url)
        else:
            print('Choose frequency to be either batch or epoch')
        yield []
Example #16
0
def load_keras_bot(bot_name):
    model_file = 'model_zoo/' + bot_name + '_bot.yml'
    weight_file = 'model_zoo/' + bot_name + '_weights.hd5'
    with open(model_file, 'r') as f:
        yml = yaml.load(f)
        model = model_from_yaml(yaml.dump(yml))
        # Note that in Keras 1.0 we have to recompile the model explicitly
        model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
        model.load_weights(weight_file)
    processor = SevenPlaneProcessor()
    return KerasBot(model=model, processor=processor)
Example #17
0
def load_model(weight_path, structure_path=''):
    """
    load the keras model, from your saved model

    :return: uncompile model
    """
    if structure_path == '':
        structure_path = weight_path + ".yaml"
    model = model_from_yaml(open(structure_path).read())
    model.load_weights(weight_path)
    return model
Example #18
0
def loadModel(MODEL_NAME):

    print('Loading Model..')

    model = model_from_yaml(open(MODEL_NAME+'.yaml').read())
    model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
    model.load_weights(MODEL_NAME+'.h5')
    model.summary()

    print('Done')

    return model
Example #19
0
    def train(self, data_iterator):

        feature_iterator, label_iterator = tee(data_iterator,2)
        X_train = np.asarray([x for x,y in feature_iterator])
        y_train = np.asarray([y for x,y in label_iterator])

        model = model_from_yaml(self.yaml)
        model.set_weights(self.parameters.value)
        # Only start training if there's more than one batch of data
        if X_train.shape[0] > self.train_config.get('batch_size'):
            model.fit(X_train, y_train, show_accuracy=True, **self.train_config)
        yield model.get_weights()
Example #20
0
def load_keras_model(weights, yaml,
                     normalise_conv_for_one_hot_encoded_input=False,
                     name_of_conv_layer_to_normalise=None): 
    #At the time of writing, I don't actually use this because
    #I do the converion in convert_sequential_model to the deeplift_layer
    from keras.models import model_from_yaml                                    
    model = model_from_yaml(open(yaml).read()) 
    model.load_weights(weights) 
    if (normalise_conv_for_one_hot_encoded_input):
        mean_normalise_first_conv_layer_weights(
         model,
         name_of_conv_layer_to_normalise=name_of_conv_layer_to_normalise)
    return model 
Example #21
0
    def train(self, data_iterator):

        feature_iterator, label_iterator = tee(data_iterator,2)
        X_train = np.asarray([x for x,y in feature_iterator])
        y_train = np.asarray([y for x,y in label_iterator])

        model = model_from_yaml(self.yaml)
        model.set_weights(self.parameters.value)
        weights_before_training = model.get_weights()
        if X_train.shape[0] > self.train_config.get('batch_size'):
            model.fit(X_train, y_train, show_accuracy=True, **self.train_config)
        weights_after_training = model.get_weights()
        deltas = subtract_params(weights_before_training, weights_after_training)
        yield deltas
Example #22
0
    def test_sequential(self):
        print('Test sequential')
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict(X_test, verbose=0)
        model.predict_classes(X_test, verbose=0)
        model.predict_proba(X_test, verbose=0)
        model.get_config(verbose=0)

        print('test weight saving')
        fname = 'test_sequential_temp.h5'
        model.save_weights(fname, overwrite=True)
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate(X_train, y_train, verbose=0)
        assert(loss == nloss)

        # test json serialization
        json_data = model.to_json()
        model = model_from_json(json_data)

        # test yaml serialization
        yaml_data = model.to_yaml()
        model = model_from_yaml(yaml_data)
Example #23
0
    def train(self, data_iterator):
        '''
        Train a keras model on a worker and send asynchronous updates
        to parameter server
        '''
        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        if x_train.size == 0:
            return

        model = model_from_yaml(self.yaml, self.custom_objects)
        model.compile(optimizer=self.master_optimizer, loss=self.master_loss, metrics=self.master_metrics)

        nb_epoch = self.train_config['nb_epoch']
        batch_size = self.train_config.get('batch_size')
        nb_train_sample = len(x_train[0])
        nb_batch = int(np.ceil(nb_train_sample/float(batch_size)))
        index_array = np.arange(nb_train_sample)
        batches = [(i*batch_size, min(nb_train_sample, (i+1)*batch_size)) for i in range(0, nb_batch)]

        if self.frequency == 'epoch':
            for epoch in range(nb_epoch):
                weights_before_training = get_server_weights(self.master_url)
                model.set_weights(weights_before_training)
                self.train_config['nb_epoch'] = 1
                if x_train.shape[0] > batch_size:
                    model.fit(x_train, y_train, **self.train_config)
                weights_after_training = model.get_weights()
                deltas = subtract_params(weights_before_training, weights_after_training)
                put_deltas_to_server(deltas, self.master_url)
        elif self.frequency == 'batch':
            from keras.engine.training import slice_X
            for epoch in range(nb_epoch):
                if x_train.shape[0] > batch_size:
                    for (batch_start, batch_end) in batches:
                        weights_before_training = get_server_weights(self.master_url)
                        model.set_weights(weights_before_training)
                        batch_ids = index_array[batch_start:batch_end]
                        X = slice_X(x_train, batch_ids)
                        y = slice_X(y_train, batch_ids)
                        model.train_on_batch(X, y)
                        weights_after_training = model.get_weights()
                        deltas = subtract_params(weights_before_training, weights_after_training)
                        put_deltas_to_server(deltas, self.master_url)
        else:
            print('Choose frequency to be either batch or epoch')
        yield []
Example #24
0
def get_trained_model():
    '''
    reconstruct trained model from saved files

    :rtype: a tuple of the model constructed and a yaml string of parameters
    used
    '''
    folder = settings.MODEL_DIR
    archi = folder + settings.MODEL_ARCHITECTURE
    weights = folder + settings.MODEL_WEIGHTS
    params = folder + settings.MODEL_PARAMS
    params = yaml.safe_load(open(params).read())
    model = model_from_yaml(open(archi).read())
    model.load_weights(weights)
    model.compile(loss='mse', optimizer='rmsprop',)
    return model, params
    def load(cls, basedir):
        cnn = cls()
        with open(path.join(basedir, 'model.yml'), 'r') as model_file:
            cnn.__network = model_from_yaml(
                model_file.read(),
                custom_objects={'_OneMaxPooling': _OneMaxPooling}
            )

        cnn.__network.load_weights(path.join(basedir, 'weights.h5'))

        with open(path.join(basedir, 'index.json'), 'r') as index_file:
            cnn.__index = json.load(index_file)
            cnn.__padding_index = len(cnn.__index)
            cnn.__classes = cnn.__network.outputs['output'].output_dim

        return cnn
Example #26
0
 def train(self, data_iterator):
     '''
     Train a keras model on a worker
     '''
     feature_iterator, label_iterator = tee(data_iterator, 2)
     x_train = np.asarray([x for x, y in feature_iterator])
     y_train = np.asarray([y for x, y in label_iterator])
     model = model_from_yaml(self.yaml)
     model.set_weights(self.parameters.value)
     model.compile(loss=self.keras_loss, optimizer=self.keras_optimizer)
     weights_before_training = model.get_weights()
     if x_train.shape[0] > self.train_config.get('batch_size'):
         model.fit(x_train, y_train, show_accuracy=True, **self.train_config)
     weights_after_training = model.get_weights()
     deltas = subtract_params(weights_before_training, weights_after_training)
     yield deltas
Example #27
0
    def minimize(self, model, data, max_evals):
        trials_list = self.compute_trials(model, data, max_evals)

        best_val = 1e7
        for trials in trials_list:
            for trial in trials:
                val = trial.get('result').get('loss')
                if val < best_val:
                    best_val = val
                    best_model_yaml = trial.get('result').get('model')
                    best_model_weights = trial.get('result').get('weights')

        best_model = model_from_yaml(best_model_yaml)
        best_model.set_weights(pickle.loads(best_model_weights))

        return best_model
Example #28
0
    def __init__(self, architecture_file, weights_file, output_layer):
        """
        Initialize model
        """

        # Deferred import
        from keras.models import model_from_yaml
        from keras import backend as K

        self.model = model_from_yaml(open(architecture_file).read())
        self.model.load_weights(weights_file)

        # Output function
        self.predict = K.function(
            [self.model.layers[0].input, K.learning_phase()],
            self.model.layers[output_layer].output)
Example #29
0
    def __init__(self, architecture_file, weights_file, output_layer):
        """
        Initialize model
        """

        # Deferred import
        from keras.models import model_from_yaml
        import theano

        self.model = model_from_yaml(open(architecture_file).read())
        self.model.load_weights(weights_file)

        # Output function
        self.predict = theano.function([self.model.layers[0].input],
                                       self.model.layers[output_layer].get_output(train=False),
                                       allow_input_downcast=True)
Example #30
0
 def best_models(self, nb_models, model, data, max_evals):
     trials_list = self.compute_trials(model, data, max_evals)
     num_trials = sum(len(trials) for trials in trials_list)
     if num_trials < nb_models:
         nb_models = len(trials)
     scores = []
     for trials in trials_list:
         scores = scores + [trial.get('result').get('loss') for trial in trials]
     cut_off = sorted(scores, reverse=True)[nb_models-1]
     model_list = []
     for trials in trials_list:
         for trial in trials:
             if trial.get('result').get('loss') >= cut_off:
                 model = model_from_yaml(trial.get('result').get('model'))
                 model.set_weights(pickle.loads(trial.get('result').get('weights')))
                 model_list.append(model)
     return model_list
Example #31
0
def get_model(args):
    from keras.utils.generic_utils import get_custom_objects
    custom_objects = {
        "recall": recall,
        "sensitivity": recall,
        "specificity": specificity,
        "fpr": fpr,
        "fnr": fnr,
        "precision": precision,
        "f1": f1,
        "ambig_binary_crossentropy": ambig_binary_crossentropy,
        "ambig_mean_squared_error": ambig_mean_squared_error,
        "MultichannelMultinomialNLL": MultichannelMultinomialNLL
    }
    get_custom_objects().update(custom_objects)
    if args.yaml != None:
        from keras.models import model_from_yaml
        #load the model architecture from yaml
        yaml_string = open(args.yaml, 'r').read()
        model = model_from_yaml(yaml_string)
        #load the model weights
        model.load_weights(args.weights)

    elif args.json != None:
        from keras.models import model_from_json
        #load the model architecture from json
        json_string = open(args.json, 'r').read()
        model = model_from_json(json_string)
        model.load_weights(args.weights)

    elif args.model_hdf5 != None:
        #load from the hdf5
        from keras.models import load_model
        model = load_model(args.model_hdf5)
    print("got model architecture")
    print("loaded model weights")
    print(model.summary())

    return model
def read_my_model(model_file_name="model.json", weights_file_name=None):
    # Load model
    print("Loading model from", model_file_name, "...")
    # json
    if model_file_name.split('.')[-1] == 'json':
        with open(model_file_name, 'r') as json_file:
            loaded_model_json = json_file.read()
        loaded_model = model_from_json(loaded_model_json)
    # yaml
    elif model_file_name.split('.')[-1] == 'yaml' or file_name.split('.')[-1] == 'yml':
        with open(model_file_name, 'r') as yaml_file:
            loaded_model_yaml = yaml_file.read()
        loaded_model = model_from_yaml(loaded_model_yaml)
    else:
        print("file_type can only be 'json' or 'yaml'")
    # Load weights
    if weights_file_name is not None:
        print("Loading weights from", weights_file_name, "...")
        loaded_model.load_weights(weights_file_name)
    # Return
    print("Done loading model.")
    return loaded_model
Example #33
0
def lstm_predict(string):
    #print 'loading model......'
    with open('lstm_data/lstm.yml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    #print 'loading weights......'
    model.load_weights('./model/fip/lstm.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    data = input_transform(string)
    data.reshape(1, -1)
    #print data
    result = model.predict_classes(data)
    #print result
    if result[0] == 1:
        print string, ' positive'
    if result[0] == 0:
        print string, ' negative'
    if result[0] == 2:
        print string, ' neutral'
Example #34
0
    def __init__(self, corpus_path, model_path, weights_path):
        with open(corpus_path, encoding="utf-8") as corpus_file:
            self.corpus = corpus_file.read()[155000:220000]

        # Get a unique identifier for each char in the corpus,
        # then make some dicts to ease encoding and decoding
        self.chars = sorted(list(set(self.corpus)))
        self.encoding = {c: i for i, c in enumerate(self.chars)}
        self.decoding = {i: c for i, c in enumerate(self.chars)}

        # Some fields we'll need later
        self.num_chars = len(self.chars)
        self.sentence_length = 50
        self.corpus_length = len(self.corpus)

        # Build our network from loaded architecture and weights
        with open(model_path) as model_file:
            architecture = model_file.read()

        self.model = model_from_yaml(architecture)
        self.model.load_weights(weights_path)
        self.model.compile(loss='categorical_crossentropy', optimizer='adam')
Example #35
0
 def post(self, request, *args, **kwargs):
     features = FeatureSerializer(data=request.data, required=False)
     first = time.time()
     if features.is_valid():
         attr = list()
         for k, v in features.data.items():
             attr.append([v])
         print(attr)
         #l = list()
         #l.append(attr)
         #for i in range(27):
         #    l.append([])
         #load YAML and create model
         yaml_file = open('api/model.yaml', 'r')
         loaded_model_yaml = yaml_file.read()
         yaml_file.close()
         loaded_model = model_from_yaml(loaded_model_yaml)
         # load weights into new model
         loaded_model.load_weights("api/model.h5")
         print("Loaded model from disk")
         import numpy as np
         X = np.asarray(attr)
         sh = X.shape
         # evaluate loaded model on test data
         loaded_model.compile(loss='binary_crossentropy',
                              optimizer='rmsprop',
                              metrics=['accuracy'])
         #if (X.ndim == 1):
         #   X = np.array([X])
         print(X)
         print(X.T)
         y = loaded_model.predict(X.T)
         #print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1] * 100))
         print(time.time() - first)
         return Response(y, status=status.HTTP_201_CREATED)
     else:
         return Response(features.errors,
                         status=status.HTTP_400_BAD_REQUEST)
Example #36
0
def load_keras_model(weights,
                     yaml=None,
                     json=None,
                     normalise_conv_for_one_hot_encoded_input=False,
                     normalise_across_rows=True,
                     name_of_conv_layer_to_normalise=None):
    assert yaml is not None or json is not None,\
     "either yaml or json must be specified"
    assert yaml is None or json is None,\
     "only one of yaml or json must be specified"
    if (yaml is not None):
        from keras.models import model_from_yaml
        model = model_from_yaml(open(yaml).read())
    else:
        from keras.models import model_from_json
        model = model_from_json(open(json).read())
    model.load_weights(weights)
    if (normalise_conv_for_one_hot_encoded_input):
        mean_normalise_first_conv_layer_weights(
            model,
            normalise_across_rows=normalise_across_rows,
            name_of_conv_layer_to_normalise=name_of_conv_layer_to_normalise)
    return model
Example #37
0
def load_keras_model(h5_file,
                     json_file=None,
                     yaml_file=None,
                     is_weights=False,
                     from_json=True):
    """
    Utility to load the whole model
    """
    # third-party imports
    from keras.models import load_model, model_from_json, model_from_yaml

    if is_weights:
        if from_json:
            json_string = open(json_file, "r").read()
            model = model_from_json(json_string)
        else:
            yaml_string = open(yaml_file, "r").read()
            model = model_from_yaml(yaml_string)
        model.load_weights(h5_file)
    else:
        model = load_model(h5_file)

    return model
Example #38
0
def lstm_predict_batch(data):
    result = []
    tokenwords = tokenizer(data)
    model = word2vec.Word2Vec.load('data/model/word2vec/word2vec')
    for line in tokenwords:
        tmp_line = create_dictionaries(model, line)
        if tmp_line is not None and tmp_line.shape == (30, 200):
            result.append(tmp_line)
    result = array(result)

    with open('data/model/lstm/lstm_koubei.yml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    #    print 'loading weights......'
    model.load_weights('data/model/lstm/lstm_koubei.h5')
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    result = model.predict_classes(result)
    result = map(lambda x: 'positive ' if x[0] == 1 else ' negative', result)
    return result
Example #39
0
def lstm_predict(data):
    print 'Loading Model...'
    with open('./model/lstm.yaml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    print 'Loading Weights...'
    model.load_weights('./model/lstm.h5')
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    data.reshape(-1, 1)
    # print data

    result = model.predict_classes(data)
    # print result

    if result[0] == 1:
        print 'positive'
    elif result[0] == 0:
        print 'neural'
    else:
        print 'negative'
Example #40
0
def pred_data():

    with open('./models/cat_dog.yaml') as yamlfile:
        loaded_model_yaml = yamlfile.read()
    model = model_from_yaml(loaded_model_yaml)
    model.load_weights('./models/cat_dog.h5')

    sgd = Adam(lr=0.0003)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    images = []
    path = './data/test/'
    for f in os.listdir(path):
        img = image.load_img(path + f, target_size=image_size)
        img_array = image.img_to_array(img)

        x = np.expand_dims(img_array, axis=0)
        x = preprocess_input(x)
        result = model.predict_classes(x, verbose=0)

        print(f, result[0])
Example #41
0
    def train(self, data_iterator):
        """Train a keras model on a worker
        """
        optimizer = get_optimizer(self.master_optimizer)
        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=optimizer,
                           loss=self.master_loss,
                           metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        # self.model.compile(optimizer=self.master_optimizer,
        #                    loss=self.master_loss, metrics=self.master_metrics)
        weights_before_training = self.model.get_weights()
        if x_train.shape[0] > self.train_config.get('batch_size'):
            self.model.fit(x_train, y_train, **self.train_config)
        weights_after_training = self.model.get_weights()
        deltas = subtract_params(weights_before_training,
                                 weights_after_training)
        yield deltas, weights_after_training
Example #42
0
    def from_disk(cls, architecture, weights):
        """Load pre-trained sequence embedding from disk

        Parameters
        ----------
        architecture : str
            Path to architecture file (e.g. created by `to_disk` method)
        weights : str
            Path to pre-trained weight file (e.g. created by `to_disk` method)

        Returns
        -------
        sequence_embedding : SequenceEmbedding
            Pre-trained sequence embedding model.
        """
        self = SequenceEmbedding()

        with open(architecture, 'r') as fp:
            yaml_string = fp.read()
        self.embedding_ = model_from_yaml(yaml_string,
                                          custom_objects=CUSTOM_OBJECTS)
        self.embedding_.load_weights(weights)
        return self
Example #43
0
def lstm_predict(inputList):
	print('    Loading model -> DeepLearning_LSTM')
	with open('../docs/lstm_data/lstm.yml', 'r') as f:
		yaml_string = yaml.load(f)
	model = model_from_yaml(yaml_string)

	print('    Loading weights -> DeepLearning_LSTM')
	model.load_weights('../docs/lstm_data/lstm.h5')
	model.compile(loss = 'binary_crossentropy',optimizer = 'adam',metrics = ['accuracy'])
	print('    Transforming input data ...')
	transList = input_transform(inputList)
	print('    Classifying ( DeepLearning_LSTM) ...')
	scoreList= []
	for data in transList:
		data.reshape(1,-1)
		result = model.predict_classes(data, verbose=0)
		prob = model.predict_proba(data, verbose=0)
		if result[0][0] == 1:
			score = prob[0][0]
		else:
			score = prob[0][0]*-1
		scoreList.append(score)
	return scoreList
def get_model(string):
    k.clear_session()
    print('loading model yaml')
    with open(syspath+'/model_picture/Xlnet-BiLSTM-CNN-3-Attention.yml','r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string,custom_objects={'AttentionLayer':AttentionLayer})
    print('loading model weights')
    model.load_weights(syspath+'/model_picture/XLnet-BiLSTM-CNN-3-Attention.h5','r')
    model.compile(loss=losses.categorical_crossentropy, optimizer=optimizers.Adam(lr=learning_rate),
                          metrics=['accuracy'])
    try:
        result = model.predict(string)
        result = np.array(result)
        print(result)
        x = np.argmax(result)
        print(x)
    except Exception as e:
        print(e)
    
    del model
    del result
    gc.collect()
    return x
Example #45
0
    def _fit(self, df):
        '''
        Private fit method of the Estimator, which trains the model.
        '''
        simple_rdd = df_to_simple_rdd(
            df,
            categorical=self.get_categorical_labels(),
            nb_classes=self.get_nb_classes(),
            featuresCol=self.getFeaturesCol(),
            labelCol=self.getLabelCol())
        simple_rdd = simple_rdd.repartition(self.get_num_workers())
        optimizer = None
        if self.get_optimizer_config() is not None:
            optimizer = get(self.get_optimizer_config()['name'],
                            self.get_optimizer_config())

        keras_model = model_from_yaml(self.get_keras_model_config())

        spark_model = SparkModel(simple_rdd.ctx,
                                 keras_model,
                                 optimizer=optimizer,
                                 mode=self.get_mode(),
                                 frequency=self.get_frequency(),
                                 num_workers=self.get_num_workers())
        spark_model.train(simple_rdd,
                          nb_epoch=self.get_nb_epoch(),
                          batch_size=self.get_batch_size(),
                          verbose=self.get_verbosity(),
                          validation_split=self.get_validation_split())

        model_weights = spark_model.master_network.get_weights()
        weights = simple_rdd.ctx.broadcast(model_weights)
        return ElephasTransformer(
            labelCol=self.getLabelCol(),
            outputCol='prediction',  # TODO: Set default value
            keras_model_config=spark_model.master_network.to_yaml(),
            weights=weights)
Example #46
0
    def __init__(self):

        self.label = ['pos', 'mid', 'neg']
        self.maxlen = 50

        print('加载训练好的数据模型...')
        # 加载 lstm 的网络模型
        with open(os.path.join(config.BASH_PATH, 'lstm_data', 'lstm.yml'),
                  'r') as f:
            modelStr = yaml.load(f)
        self.model = model_from_yaml(modelStr)

        print("加载训练好的模型参数...")
        # 加载 lstm 模型的权值参数
        self.model.load_weights(
            os.path.join(config.BASH_PATH, 'lstm_data', 'lstm.h5'))
        self.model.compile(loss='binary_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])

        # 加载词典中的负面词。
        self.negWord = []
        negFile = open(os.path.join(config.BASH_PATH, 'data', 'negWord.txt'),
                       encoding='UTF-8')
        for line in negFile.readlines():
            if line.strip():
                self.negWord.append(line.strip())

        # 加载词典中的正面词
        self.posWord = []
        posFile = open(os.path.join(config.BASH_PATH, 'data', 'posWord.txt'),
                       encoding='UTF-8')
        for line in posFile.readlines():
            if line.strip():
                self.posWord.append(line.strip())

        self.allWords = self.posWord + self.negWord
def lstm_predict(string):
    with open('model/lstm.yml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)
    model.load_weights('model/lstm.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    data = input_transform(string)
    data.reshape(1, -1)
    result = model.predict_classes(data)
    # print result # [[1]]
    if result[0] == 1:
        s = 'positive'
        print(string, ' positive')
        return s

    elif result[0] == 0:
        s = 'neural'
        print(string, ' neural')
        return s
    else:
        s = '******'
        return s
Example #48
0
    def __init__(self, predicted_product):
        model_to_load = "all"
        if predicted_product == "business-card":
            model_to_load = "bc"
            self.max_length = 18
        elif predicted_product == "banners":
            model_to_load = "banners"
        yaml_file = open('../Model/caption_pred_model_{0}.yaml'.format(model_to_load), 'r')
        loaded_model_yaml = yaml_file.read()
        yaml_file.close()
        self.model = model_from_yaml(loaded_model_yaml)
        self.model.load_weights("../Model/caption_pred_model_{0}.h5".format(model_to_load))
        with open('../Model/corpus_{0}.txt'.format(model_to_load), 'rb') as corpus_file:
            self.corpus = pickle.load(corpus_file)
        self.word_to_position = {}
        self.position_to_word = {}
        position = 1
        for word in self.corpus:
            self.word_to_position[word] = position
            self.position_to_word[position] = word
            position += 1

        inceptionV3_model = InceptionV3(weights='imagenet')
        self.model_inception = Model(inceptionV3_model.input, inceptionV3_model.layers[-2].output)
Example #49
0
def lstm_predict_list(string):
    print('loading model......')
    with open('../model/lstm.yml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    print('loading weights......')
    model.load_weights('../model/lstm.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam', metrics=['accuracy'])

    out=list()
    for str in string:
        data = input_transform(str)
        data.reshape(1, -1)
        # print(data)
        result = model.predict_classes(data)
        if result[0] == 1:
            out.append('1')
        elif result[0] == 0:
            out.append('0')
        else:
            pass
    print(out)
Example #50
0
 def load_model(self):
     print('Loading model ...')
     # check existence of params
     assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)
     assert self.weights_fname is not None, 'Argument required: --weights-file'
     checkExistence(self.weights_fname)
     model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
     model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
     checkExistence(model_graph)
     checkExistence(model_train_vars)
     from keras.models import model_from_yaml
     with open(model_graph, 'r') as fgraph:
         self.model = model_from_yaml(fgraph.read())
         self.model.load_weights(self.weights_fname)
     npzfile = np.load(model_train_vars)
     self.maxlen_userUtter = np.int32(npzfile['maxlen_userUtter'][()])
     self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])
     self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])
     self.userIntent_vocab_size = np.int32(
         npzfile['userIntent_vocab_size'][()])
     self.id2userTag = npzfile['id2userTag'][()]
     self.id2word = npzfile['id2word'][()]
     self.id2userIntent = npzfile['id2userIntent'][()]
     self.userTag2id = npzfile['userTag2id'][()]
Example #51
0
def lstm_predict2(string=None):
    print('loading model......')

    with open('../new_data_lstm4/lstm.yml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    print('loading weights......')
    model.load_weights('../new_data_lstm4/lstm.h5')

    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    my_num = 0
    my_sum = 0
    for string in sentencs():
        my_sum += 1
        try:
            data = input_transform(string)
            result = model.predict_classes(data)
            if result[0][0] == 1:
                print('不敏感')
                print(string, ' 不敏感')
                my_sentencs = string
                result = '不敏感'

            else:
                my_num += 1
                print('敏感')
                print(string, ' 敏感')
                my_sentencs = string
                result = '敏感'
        except:
            pass
    acc = my_num / my_sum
    print('准确度是%f' % acc)
def load_model(bin_dir):
    ''' Load model from .yaml and the weights from .h5

        Arguments:
            bin_dir: The directory of the bin (normally bin/)

        Returns:
            Loaded model from file
    '''
    global model

    # load YAML and create model
    yaml_file_1 = open('%s/model.yaml' % bin_dir, 'r')
    loaded_model_yaml_1 = yaml_file_1.read()
    yaml_file_1.close()
    model = model_from_yaml(loaded_model_yaml_1)

    # load weights into new model
    model.load_weights('%s/model.h5' % bin_dir)
    #workaround for tensorflow thread error
    global graph1
    graph1 = tf.get_default_graph()

    return model
Example #53
0
    def post(self,request):
        global graph
        with graph.as_default():
            base_64_string = request.POST['img-src'].replace(
                'data:image/png;base64,',''
            )
            file = BytesIO(base64.b64decode(base_64_string))

            with open(os.path.join('cnn', 'model.json'), 'r') as f:
                    m = f.read()
                    model = model_from_yaml(m)

            model.load_weights(os.path.join('cnn', 'param.hdf5'))
            img = Image.open(file).resize((28,28)).convert('L')
            img = np.asarray(img)
            img_array = img.reshape(1,28, 28,1)/255

            pr = model.predict(img_array)

            context = {
                'result':np.argmax(pr)
            }

            return render(self.request,'cnn/results.html',context)
Example #54
0
def lstm_predict(string):
    print('loading model......')
    print(os.path.realpath(__file__))
    with open('MultiClass_lstm/LstmMaster/lstm_data/lstm.yml', 'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    print('loading weights......')
    model.load_weights('MultiClass_lstm/LstmMaster/lstm_data/lstm.h5')
    model.compile(loss='binary_crossentropy',
                  optimizer='adam', metrics=['accuracy'])
    data = input_transform(string)
    data.reshape(1, -1)


    result = model.predict_classes(data)
    string = jieba.lcut(string)
    print(result)
    if result[0][0] == 1:
        print(string, ' \n积极')
        return "积极"
    else:
        print(string, ' \n消极')
        return "消极"
Example #55
0
def predict_pn():

    with open('E:/ML/models/nlp/text_p_n/word2vec_pn_cnn.yaml') as yamlfile:
        loaded_model_yaml = yamlfile.read()

    model = model_from_yaml(loaded_model_yaml)
    model.load_weights('E:/ML/models/nlp/text_p_n/word2vec_pn_cnn.h5')

    sgd = Adam(lr=0.0003)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    pre_input_texts = [
        "很好很满意", "特别好,服务态度很不错,售后很及时", "不好不满意", "质量问题严重", "商家态度很差", "售后很渣,差评"
    ]
    pre_index, pre_texts = train_word2vect(pre_input_texts)

    pre_result = model.predict_classes(pre_texts)
    print(pre_result)
    labels = [int(round(x[0])) for x in pre_result]
    label2word = {1: 'pos', 0: 'neg'}
    for i in range(len(pre_result)):
        print('{0}------{1}'.format(pre_input_texts[i], label2word[labels[i]]))
Example #56
0
def lstm_predict(string):
    print('loading model......')
    with open(
            'D:\Python/mo_fa_shi_bei/SentimentAnalysis-master/code/model/lstm.yml',
            'r') as f:
        yaml_string = yaml.load(f)
    model = model_from_yaml(yaml_string)

    print('loading weights......')
    model.load_weights(
        'D:\Python/mo_fa_shi_bei/SentimentAnalysis-master/code/model/lstm.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    data = input_transform(string)
    data.reshape(1, -1)
    result = model.predict_classes(data)
    if result[0] == 1:
        print('positive')  # 1
    elif result[0] == 0:
        print('neural')  # 0
    else:  # -1
        print('negative')  # 2
    return result[0]
Example #57
0
    def load(self):
        log.info("loading project %s ..." % self.path)

        if not self.exists():
            return "%s does not exist" % self.path

        err = self.logic.load()
        if err is not None:
            return err

        if os.path.exists(self.weights_path):
            log.debug("loading model from %s ...", self.weights_path)
            self.model = load_model(self.weights_path)
            # https://github.com/keras-team/keras/issues/6462
            self.model._make_predict_function()

        elif os.path.exists(self.model_path):
            log.debug("loading model from %s ...", self.model_path)
            with open(self.model_path, 'r') as fp:
                self.model = model_from_yaml(fp.read())

        else:
            self.model = self.logic.builder(True)

        if os.path.exists(self.history_path):
            log.debug("loading history from %s ...", self.history_path)
            with open(self.history_path, 'r') as fp:
                self.history = json.load(fp)


        if os.path.exists(self.classes_path):
            log.debug("loading classes from %s ...", self.classes_path)
            with open(self.classes_path, 'r') as fp:
                self.classes = {int(k) : v for k, v in json.load(fp).items()}

        return None
Example #58
0
    def load_model(self):
        """Load the Recurrent Neural Network model from disk.

        Args:
            None

        Returns:
            _model: RNN model

        """
        # Load yaml and create model
        print('> Loading model from disk')
        with open(self._path_model_parameters, 'r') as yaml_file:
            loaded_model_yaml = yaml_file.read()
        _model = model_from_yaml(loaded_model_yaml)

        # Load weights into new model
        _model.load_weights(self._path_model_weights, by_name=True)
        '''sys.exit(0)
        _model.load_weights(self._path_checkpoint)'''
        print('> Finished loading model from disk')

        # Return
        return _model
Example #59
0
def lstm_predict(string):
    print('loading model......')
    with open('../model/lstm.yml', 'r') as f:
        yaml_string = yaml.load(f)
    # print("yaml_string:",yaml_string)
    model = model_from_yaml(yaml_string)

    # print 'loading weights......'
    model.load_weights('../model/lstm.h5')
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print("string:", string)
    data = input_transform(string)
    data.reshape(1, -1)
    ## print data
    result = model.predict_classes(data)

    print("result:", result)
    # print result # [[1]]
    if result[0] == 1:
        print("预测" + string + ' is normal')
    else:
        print("预测" + string, ' is garbled')
Example #60
0
def load_model(model_config_path, model_weights_path=None):
    """Load a saved model.

    Parameters
    ----------
    model_config_path: str
      The path to the model configuration yaml file. We have provided
      you this file for problems 2 and 3.
    model_weights_path: str, optional
      If specified, will load keras weights from hdf5 file.

    Returns
    -------
    keras.models.Model
    """
    with open(model_config_path, 'r') as f:
        model = model_from_yaml(f.read())

    if model_weights_path is not None:
        model.load_weights(model_weights_path)

    model.summary()

    return model