def instantiateModel(hyperparams):

    # We'll copy the same model from above
    def custom_model(X, y):
        X = learn.ops.batch_normalize(X, scale_after_normalization=True)

        layers = learn.ops.dnn(X,
                               hyperparams['HIDDEN_UNITS'],
                               activation=hyperparams['ACTIVATION_FUNCTION'],
                               dropout=hyperparams['KEEP_PROB'])

        return learn.models.logistic_regression(layers, y)

    classifier = learn.TensorFlowEstimator(
        model_fn=custom_model,
        n_classes=y_classes,
        batch_size=hyperparams['BATCH_SIZE'],
        steps=hyperparams['STEPS'],
        optimizer=hyperparams['OPTIMIZER'],
        learning_rate=hyperparams['LEARNING_RATE'])

    # We'll make a monitor so that we can implement early stopping based on our train accuracy. This will prevent overfitting.
    monitor = learn.monitors.BaseMonitor(early_stopping_rounds=int(
        hyperparams['MAX_BAD_COUNT']),
                                         print_steps=100)

    return classifier, monitor
        pool1 = tf.nn.max_pool(conv1,
                               ksize=[1, POOLING_WINDOW, 1, 1],
                               strides=[1, POOLING_STRIDE, 1, 1],
                               padding='SAME')
        # Transpose matrix so that n_filters from convolution becomes width.
        pool1 = tf.transpose(pool1, [0, 1, 3, 2])
    with tf.variable_scope('CNN_Layer2'):
        # Second level of convolution filtering.
        conv2 = skflow.ops.conv2d(pool1,
                                  N_FILTERS,
                                  FILTER_SHAPE2,
                                  padding='VALID')
        # Max across each filter to get useful features for classification.
        pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
    # Apply regular WX + B and classification.
    return skflow.models.logistic_regression(pool2, y)


classifier = skflow.TensorFlowEstimator(model_fn=cnn_model,
                                        n_classes=15,
                                        steps=100,
                                        optimizer='Adam',
                                        learning_rate=0.01,
                                        continue_training=True)

# Continuesly train for 1000 steps & predict on test set.
while True:
    classifier.fit(X_train, y_train, logdir='/tmp/tf_examples/word_cnn')
    score = metrics.accuracy_score(y_test, classifier.predict(X_test))
    print('Accuracy: {0:f}'.format(score))
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T

# Fit regression DNN models.
regressors = []
options = [[2], [10, 10], [20, 20]]
for hidden_units in options:

    def tanh_dnn(X, y):
        features = skflow.ops.dnn(X,
                                  hidden_units=hidden_units,
                                  activation=skflow.tf.tanh)
        return skflow.models.linear_regression(features, y)

    regressor = skflow.TensorFlowEstimator(model_fn=tanh_dnn,
                                           n_classes=0,
                                           steps=500,
                                           learning_rate=0.1,
                                           batch_size=100)
    regressor.fit(X, y)
    score = mean_squared_error(regressor.predict(X), y)
    print("Mean Squared Error for {0}: {1:f}".format(str(hidden_units), score))
    regressors.append(regressor)

# Predict on new random Xs.
X_test = np.arange(-100.0, 100.0, 0.1)[:, np.newaxis]
y_1 = regressors[0].predict(X_test)
y_2 = regressors[1].predict(X_test)
y_3 = regressors[2].predict(X_test)

# Plot the results
plt.figure()
Exemple #4
0
    X, y, test_size=0.2, random_state=42)

# Split X_train again to create validation data

X_train, X_val, y_train, y_val = cross_validation.train_test_split(
    X_train, y_train, test_size=0.2, random_state=42)

# TensorFlow model using Scikit Flow ops


def conv_model(X, y):
    X = tf.expand_dims(X, 3)
    features = tf.reduce_max(skflow.ops.conv2d(X, 12, [3, 3]), [1, 2])
    features = tf.reshape(features, [-1, 12])
    return skflow.models.logistic_regression(features, y)


val_monitor = monitors.ValidationMonitor(X_val,
                                         y_val,
                                         n_classes=10,
                                         print_steps=50)
# Create a classifier, train and predict.
classifier = skflow.TensorFlowEstimator(model_fn=conv_model,
                                        n_classes=10,
                                        steps=1000,
                                        learning_rate=0.05,
                                        batch_size=128)
classifier.fit(X_train, y_train, val_monitor)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Test Accuracy: {0:f}'.format(score))
Exemple #5
0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from sklearn import datasets, metrics, cross_validation
from tensorflow.contrib import skflow

iris = datasets.load_iris()
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
    iris.data, iris.target, test_size=0.2, random_state=42)


def my_model(X, y):
    """This is DNN with 10, 20, 10 hidden layers, and dropout of 0.9 probability."""
    layers = skflow.ops.dnn(X, [10, 20, 10], keep_prob=0.9)
    return skflow.models.logistic_regression(layers, y)


classifier = skflow.TensorFlowEstimator(model_fn=my_model,
                                        n_classes=3,
                                        steps=1000)
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
Exemple #6
0
def tohenizer(iterator): 
	for value in iterator: 
		ss = [];
		for v in value:
		ss.append(v);
		yield ss;

def training_data(path):
	X = [];
	fp = open (path, "r");
	for line in fp.readlines(): 
		line=line.strip(); 
		if not line: 
			continue;
			#注这里把字符串按字切开了
		origin=list(line.decode("utf-8")); 
		if len(origin) >= 50:
			origin = origin[:49];
			#敢后加一个恃殊5词|表示句子结束 
			X.append(origin + ["<E0S/>"]); 
			return np.array(X);


def iter_docs(docs):
	for doc in docs:
		n_parts = int(math.ceil(float(len(doc))/MAX_DOC_LENGTH)) 
		for part in range(n_parts):
			offset_begin = part*MAX_DOC_LENGTH 
			offset_end = offset_begin + MAX_DOC_LENGTH 
			inp = np.zeros(HAX_DOC_LENGTH,dtype=np,int32)
			out = np.zeros(HAX_DOC_LENGTH,dtype=np.int32)
			#输出莴实是输A右移一个宇&
			inp[:min(offset_end-offset_begin,len(doc) - offset_begin)]=\
			doc[offset_begin:offset_end]
			out[:min(offset_end - offset_begin,len(doc) - offset_begin-1)] = \
			doc[offset_begin + i:offset_end + l]
			yield inp,out

def unpack_xy(iter_obj):
	X, y = itertools.tee(iter_obj)
	return(item[0] for item in X), (item[1] for item in y)


# ffiunicode字符(以及*/E0S>)转换为教宇1-K,出现次数低子2次的丢掉(映射到0) 

vocab_processor = skflow.preprocessing.VocabularyProcessor(MAX_DOC.LENGTH,min_frequency=2, tokenizer_fn=tokenier);
datao=training_data(CORPUS_FILEMAME)

#闕发炚射
vocab_processor.fit(datao)
#把芋典存起来.在线程痒霈要使用 

fp=open('ner/vacab.txt', 'r');
for k,v in vocab_processor.vocabulary_._mapping.iteritems():
	fp.write("%s\t%d\n" % (k.encode("utf-8"),v));
fp.close();

n_words = len(vocab_processor.vocabulary_) 
print('Total words: %d' % (n_words)) 

## Model 
HIDDEN SIZE = 874

def get_language_madel(hidden_size):
	"""Returns d language model with given hidden size."""
	def language_model(X, y):
		#把字索引变成Dnejiot向fi
		inputs = skflow.ops.one_hot_matrix(X, n_words)
		#也可以用embedding方式
		#	inputs =skflow.ops.categorical_variable(X,
		#	n_classes=n_words,embedding_size=50,name='words')
		#切割成tenso「列丟,准备UnroU-RNN
		inputs = skflow.ops.split_squeeze(l,MAX_DOC_LENGTH,inputs)
		target = skflow.ops.split_squeeze(1,MAX_DOC_LENGTH, y)
		#	RNN中得编码单元,这里GRUCeU可铐换成LSTW 
		encoder_cell = tf.nn.rnn_cell.OutputProjectionWrapper(tf.nn.rnn_cell.GRUCell(hidden_size), n_words)
		#只要输出,状态不要了	v f
		output,_= tf.nn.rnn(encoder_cell, inputs, dtype=tf.float32) 
		# skflow里带的序列分类器.loss果加a的交叉熵之和
		return skflow.ops.sequence_classifier(output, target)
	return language_model 

def exp_decay(global_step):
	return tf.train.exponentiai_decay(0.001,global_step, 5000, 0.5, staircase=True)

#Training model■
model_path = "ner/address_logs" 

if os.path.exists(model_path):
	estimator=skflow.TensorFlowEstimator.restore(model_path)
else:
	estimator=skflow.TensorFlowEstimator(model_fn=get_language_model(HIDDEN_SIZE),
		n_classes=n_words,
		optimizer='Adam',
		learning_rate=exp_decay,
		steps=16273, 
		batch_size=64, 
		continue_training=True)

# Continously train for 1000 steps & predict on test while True:
while True:
	try:
		perm = np.random.permutation(len(datao));
		datao = datao[perm];
		data = vocab_processor.transform(datao)
		X, y = unpack_xy(iter_docs(data))
		estimator.fit(X,y,logdir=model_path) 
		estimator.save(model_path) 
	except KeyboardInterrupt:
		estimator.save(model__path)
		break;
Exemple #7
0
    net = tf.reshape(net, [-1, net_shape[1] * net_shape[2] * net_shape[3]])

    return skflow.models.logistic_regression(net, y)


# Download and load MNIST data.
mnist = input_data.read_data_sets('MNIST_data')

# Restore model if graph is saved into a folder.
if os.path.exists("models/resnet/graph.pbtxt"):
    classifier = skflow.TensorFlowEstimator.restore("models/resnet/")
else:
    # Create a new resnet classifier.
    classifier = skflow.TensorFlowEstimator(model_fn=res_net,
                                            n_classes=10,
                                            batch_size=100,
                                            steps=100,
                                            learning_rate=0.001,
                                            continue_training=True)

while True:
    # Train model and save summaries into logdir.
    classifier.fit(mnist.train.images,
                   mnist.train.labels,
                   logdir="models/resnet/")

    # Calculate accuracy.
    score = metrics.accuracy_score(
        mnist.test.labels, classifier.predict(mnist.test.images,
                                              batch_size=64))
    print('Accuracy: {0:f}'.format(score))
Exemple #8
0
    with tf.variable_scope('full_connection1'):
        net = reshape_1x2(net)
        net = skflow.ops.dnn(net, [1024], activation=act)
    return skflow.models.logistic_regression(net, y)


config = skflow.RunConfig(num_cores=8)
val_monitor = skflow.monitors.ValidationMonitor(
    X_test,
    y_test,
    early_stopping_rounds=EARLY_STOPPING_ROUNDS,
    n_classes=n_classes)

classifier = skflow.TensorFlowEstimator(model_fn=conv_model,
                                        n_classes=n_classes,
                                        batch_size=BATCH_SIZE,
                                        steps=NUMBER_OF_STEPS,
                                        optimizer='Adagrad',
                                        continue_training=True,
                                        config=config,
                                        learning_rate=INITIAL_LEARNING_RATE)

t0 = time()
print("Training:")
classifier.fit(X_train, y_train, val_monitor)

score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print("Final accuracy: {:0.9f}".format(score))
print("done in %0.3fs" % (time() - t0))
    X = learn.ops.batch_normalize(X, scale_after_normalization=True)

    # Now we'll pass our normalized batch to a DNN
    # We can pass a TensorFlow object as the activation function
    layers = learn.ops.dnn(X, [10, 20, 10], activation=tf.nn.relu, dropout=0.5)

    # Given encoding of DNN, take encoding of last step (e.g hidden state of the
    # neural network at the last step) and pass it as features for logistic
    # regression over the label classes.
    return learn.models.logistic_regression(layers, y)


# We need a generic TF Learn model to wrap our custom model. For regression you can use learn.TensorFlowDNNRegressor.
classifier = learn.TensorFlowEstimator(model_fn=custom_model,
                                       n_classes=y_classes,
                                       batch_size=32,
                                       steps=500,
                                       optimizer="Adam",
                                       learning_rate=0.01)


# We'll make a function for training and evaluating
def run_model(classifier, logdir=None, monitor=None):
    # Train
    classifier.fit(X_train, y_train, logdir=logdir, monitor=monitor)

    # Evaluate on dev data
    predictions = classifier.predict(X_dev)
    score = metrics.accuracy_score(y_dev, predictions)
    return score

Exemple #10
0
vocab_processor = skflow.preprocessing.ByteProcessor(
    max_document_length=MAX_DOCUMENT_LENGTH)

x_iter = vocab_processor.transform(X_train)
y_iter = vocab_processor.transform(y_train)
xpred = np.array(list(vocab_processor.transform(X_test))[:20])
ygold = list(y_test)[:20]

PATH = '/tmp/tf_examples/ntm/'

if os.path.exists(PATH):
    translator = skflow.TensorFlowEstimator.restore(PATH)
else:
    translator = skflow.TensorFlowEstimator(model_fn=translate_model,
                                            n_classes=256,
                                            optimizer='Adam',
                                            learning_rate=0.01,
                                            batch_size=128,
                                            continue_training=True)

while True:
    translator.fit(x_iter, y_iter, logdir=PATH)
    translator.save(PATH)

    predictions = translator.predict(xpred, axis=2)
    xpred_inp = vocab_processor.reverse(xpred)
    text_outputs = vocab_processor.reverse(predictions)
    for inp_data, input_text, pred, output_text, gold in zip(
            xpred, xpred_inp, predictions, text_outputs, ygold):
        print('English: %s. French (pred): %s, French (gold): %s' %
              (input_text, output_text, gold.decode('utf-8')))
        print(inp_data, pred)
Exemple #11
0
        in_X, in_y, encoder_cell, decoder_cell)
    return skflow.ops.sequence_classifier(decoding, out_y, sampling_decoding)


def get_language_model(hidden_size):
    """Returns a language model with given hidden size."""
    def language_model(X, y):
        inputs = skflow.ops.one_hot_matrix(X, 256)
        inputs = skflow.ops.split_squeeze(1, MAX_DOC_LENGTH, inputs)
        target = skflow.ops.split_squeeze(1, MAX_DOC_LENGTH, y)
        encoder_cell = tf.nn.rnn_cell.OutputProjectionWrapper(
            tf.nn.rnn_cell.GRUCell(hidden_size), 256)
        output, _ = tf.nn.rnn(encoder_cell, inputs, dtype=tf.float32)
        return skflow.ops.sequence_classifier(output, target)

    return language_model


### Training model.

estimator = skflow.TensorFlowEstimator(
    model_fn=get_language_model(HIDDEN_SIZE),
    n_classes=256,
    optimizer='Adam',
    learning_rate=0.01,
    steps=1000,
    batch_size=64,
    continue_training=True)

estimator.fit(X, y)
Exemple #12
0
    #return skflow.models.logistic_regression(net,y)
    return predictions, loss


path = './dataset/cifar-100-python'
Xtr, Ytr, Xte, Yte = load_CIFAR100(path)
nclass = 20
batch_size = 256
steps = int(Xtr.shape[0] / batch_size)
w = weight_variable([64, nclass], 'w')
b = bias_variable([nclass, 1], 'b')
classifier = skflow.TensorFlowEstimator(model_fn=res_net,
                                        n_classes=nclass,
                                        batch_size=batch_size,
                                        steps=steps,
                                        learning_rate=0.1,
                                        continue_training=True,
                                        optimizer="Adam",
                                        verbose=1)
import time
t = time.time()
while True:
    classifier.fit(Xtr, Ytr, logdir="models/resnet/")
    # Calculate accuracy.
    score = metrics.accuracy_score(Yte, classifier.predict(Xte, batch_size=64))
    now = int((time.time() - t) / 60.0)
    print('Accuracy: {0:f}'.format(score) + ' time' + str(now))

    # Save model graph and checkpoints.
    classifier.save("models/resnet/")
    if now > 170:
Exemple #13
0
        net = skflow.ops.dnn(net, [2048], activation=act)
    return skflow.models.logistic_regression(net, y)


# System configuration
config = skflow.RunConfig(num_cores=8)
val_monitor = skflow.monitors.ValidationMonitor(X_test,
                                                y_test,
                                                early_stopping_rounds=200,
                                                n_classes=n_classes)
# Set Classifier
classifier = skflow.TensorFlowEstimator(
    #model_fn=dnn_tanh,
    model_fn=conv_model,
    n_classes=n_classes,
    batch_size=128,
    steps=n_step,
    optimizer='Adagrad',
    continue_training=True,
    config=config,
    learning_rate=i_lr)

#############################################################################
# Training based on model
#pipeline = Pipeline([('scaler', scaler), ('classifier', classifier)])
#classifier = skflow.TensorFlowLinearClassifier(n_classes=n_classes, steps=1000)
#classifier = pipeline
t0 = time()
print("Test Validation: ")

classifier.fit(X_train_pca, y_train,
               val_monitor)  #, logdir='lfw_models/model_log/')
    # When running, X is a tensor of [batch size, num feats] and y is a tensor of [batch size, num outputs]

    # This model will use a technique called batch normalization
    X = learn.ops.batch_normalize(X, scale_after_normalization=True)

    # Now we'll pass our normalized batch to a DNN
    # We can pass a TensorFlow object as the activation function
    layers = learn.ops.dnn(X,[10,20,10],activation=tf.nn.relu,dropout=0.5)

    # Given encoding of DNN, take encoding of last step (e.g hidden state of the
    # neural network at the last step) and pass it as features for logistic
    # regression over the label classes.
    return learn.models.logistic_regression(layers, y)

# We need a generic TF Learn model to wrap our custom model. For regression you can use learn.TensorFlowDNNRegressor.
classifier = learn.TensorFlowEstimator(model_fn=custom_model, n_classes=y_classes,batch_size=32, steps=500,
                                       optimizer="Adam",learning_rate=0.01)

# We'll make a function for training and evaluating
def run_model(classifier,logdir=None,monitors=None):

    # Train
    classifier.fit(X_train, y_train, logdir=logdir,monitors=monitors)

    # Evaluate on dev data
    predictions = classifier.predict(X_dev)
    score = metrics.accuracy_score(y_dev, predictions)
    return score

score = run_model(classifier,'customModelLogs',monitors=learn.monitors.get_default_monitors())
print("Accuracy: %f" % score)