Пример #1
0
def main():
    mode = 'ebgan'
    params = {
        'learning_rate': 0.005,
        'z_dim': 1,
        'generator': partial(linear_generator, hidden_size=10),
    }
    if mode == 'gan':
        params.update({
            'discriminator':
            partial(linear_discriminator, hidden_size=10),
            'loss_builder':
            model.make_gan_loss
        })
    elif mode == 'ebgan':
        params.update({
            'discriminator':
            partial(autoencoder_discriminator, hidden_size=10),
            'loss_builder':
            partial(model.make_ebgan_loss, epsilon=0.0001)
        })
    tf.logging._logger.setLevel(logging.INFO)
    data = np.random.normal(4, 0.5, 10000).astype(np.float32)
    data.sort()
    est = learn.SKCompat(
        learn.Estimator(model_fn=model.gan_model,
                        model_dir='models/gan_intro/',
                        params=params))
    print_monitor = tf.train.LoggingTensorHook(
        ['loss_discr', 'loss_generator'], every_n_iter=100)
    est.fit(x=data,
            y=data,
            steps=10000,
            batch_size=32,
            monitors=[print_monitor])
Пример #2
0
 def __init__(self, config):
     self.config = config
     self.model = tflearn.SKCompat(
         tflearn.Estimator(model_fn=self._model(),
                           model_dir=self.config["log_dir"]))
     self.bias = None
     self.weights = None
Пример #3
0
def run_learn(pixels, label):
    cnn_dir = os.environ['VIRTUAL_ENV'] + "/mnist_convnet_model"
    mnist_dataset = mnist.load_mnist(train_dir=os.environ['VIRTUAL_ENV'] +
                                     '/MNIST-data')

    train_data = concat(mnist_dataset.train.images, pixels)
    train_labels = concat(mnist_dataset.train.labels, label)

    eval_data = concat(mnist_dataset.test.images, pixels)
    eval_labels = concat(mnist_dataset.test.labels, label)

    estimator = learn.Estimator(model_fn=cnn_model_fn, model_dir=cnn_dir)
    dataset_classifier = learn.SKCompat(estimator)

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=500)

    dataset_classifier.fit(x=train_data,
                           y=train_labels,
                           batch_size=128,
                           steps=5000,
                           monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }

    eval_results = dataset_classifier.score(x=eval_data,
                                            y=eval_labels,
                                            metrics=metrics)
    return eval_results
Пример #4
0
 def build_model(self):
     #建立预测模型
     self.model = tflearn.SKCompat(
         tflearn.Estimator(model_fn=lstm_model(self.timesteps,
                                               self.rnn_layer,
                                               self.dense_layer),
                           model_dir=self.model_dir))
Пример #5
0
def train():
    train_data, train_labels, eval_data, eval_labels = load_dataset(
        '/Users/sunary/Downloads/train')

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    dogvscat_classifier = learn.SKCompat(
        learn.Estimator(model_fn=cnn_model_fn,
                        model_dir='/tmp/dogvscat_convnet_model'))
    dogvscat_classifier.fit(x=train_data,
                            y=train_labels,
                            batch_size=50,
                            steps=1000,
                            monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes")
    }
    eval_results = dogvscat_classifier.score(x=eval_data,
                                             y=eval_labels,
                                             metrics=metrics)
    print(eval_results)
Пример #6
0
def train_autoencoder(imgs):
    autoencoder_fn = partial(linear_autoencoder_discriminator,
                             output_dim=28,
                             hidden_sizes=[50],
                             encoding_dim=5)
    est = learn.SKCompat(
        model.make_autoencoder(autoencoder_fn,
                               model_dir='models/ae_mnist',
                               params={'learning_rate': 0.01}))
    save_visual = visualization.SaveVisualizationHook(
        'models/ae_mnist/sample.jpg')
    est.fit(x=imgs, y=None, steps=3000, batch_size=32, monitors=[save_visual])
Пример #7
0
def main():
    clearLogFolder()
    load_russian_alphabet(ALPHABET_PATH, alphabet)
    if USE_SUPERTYPES:
        splitToSupertypes(supertypes)
    g = Graph()
    g = g.Read(data_in)
    get_all_seqs(g)

    if len(all_seq) > 0:
        print("Parsingsequences")

        X, y = processSequences(all_seq, TIMESTEPS)

        print("Merging sequences")
        Xm, ym = mergeSeqs(X, y)

        print("Create regressor")
        regressor = learn.SKCompat(
            learn.Estimator(model_fn=lstm_model(TIMESTEPS,
                                                RNN_LAYERS,
                                                DENSE_LAYERS,
                                                optimizer="Adam"),
                            model_dir=LOG_DIR))

        # create a lstm instance and validation monitor
        validation_monitor = learn.monitors.ValidationMonitor(
            Xm['val'],
            ym['val'],
            every_n_steps=PRINT_STEPS,
            early_stopping_rounds=1000)

        print("fit regressor")

        regressor.fit(Xm['train'],
                      ym['train'],
                      monitors=[validation_monitor],
                      batch_size=BATCH_SIZE,
                      steps=TRAINING_STEPS)

        print("predicting")

        predicted = regressor.predict(Xm['test'])
        # rmse = np.sqrt(((predicted - ym['test']) ** 2).mean(axis=0))

        score = mean_squared_error(predicted, ym['test'])
        hited = hitpoint(predicted, ym['test'])
        print("MSE: %f" % score)
        print("hitpoint:", hited)
Пример #8
0
def main():
  tf.logging._logger.setLevel(logging.INFO)
  
  # Load MNIST data.
  mnist_data = learn.datasets.load_dataset('mnist')

  # Select subset of images.
  mnist_class = None
  imgs = mnist_data.train.images
  if mnist_class is not None:
      imgs = np.array([x for idx, x in enumerate(mnist_data.train.images) if
        mnist_data.train.labels[idx] == mnist_class])

  if FLAGS.pretrain:
    images.train_autoencoder(imgs)

   # Configure.
  params = {
    'learning_rate': 0.0005,
    'z_dim': 100,
    'generator': partial(images.conv_generator, output_dim=28, n_filters=64),
  }
  if FLAGS.mode == 'gan':
    params.update({
      'discriminator': partial(images.conv_discriminator, hidden_size=10),
      'loss_builder': model.make_gan_loss
    })
  elif FLAGS.mode == 'ebgan':
    pretrained = None
    if FLAGS.pretrain:
        pretrained = 'models/ae_mnist'
    params.update({
      'discriminator': partial(images.linear_autoencoder_discriminator, output_dim=28,
          hidden_sizes=[50], encoding_dim=5, pretrained=pretrained),
      'loss_builder': partial(model.make_ebgan_loss, epsilon=0.05)
    })
  est = learn.SKCompat(learn.Estimator(
      model_fn=model.gan_model, model_dir='models/gan_mnist/', params=params))

  # Setup monitors.
  print_monitor = tf.train.LoggingTensorHook(['loss_discr', 'loss_generator'],
    every_n_iter=100)
  save_visual = visualization.SaveVisualizationHook('models/gan_mnist/sample.jpg')

  # Train for a bit.
  est.fit(x=imgs, y=None, steps=50000, batch_size=32, 
          monitors=[print_monitor, save_visual])
Пример #9
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint_dir',
                        type=str,
                        default='save',
                        help='dir to save checkpoint in')
    args = parser.parse_args()

    tf.logging.set_verbosity(tf.logging.INFO)
    mnist = learn.datasets.load_dataset("mnist")
    train_features = mnist.train.images  # Returns np.array
    train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    eval_data = mnist.test.images  # Returns np.array
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    # Train the model
    runConfig = tf.contrib.learn.RunConfig(save_summary_steps=100,
                                           save_checkpoints_secs=None,
                                           save_checkpoints_steps=100,
                                           keep_checkpoint_max=5,
                                           keep_checkpoint_every_n_hours=10000)
    estimator = learn.Estimator(model_fn=cnn_hand_digit_classifier_model,
                                model_dir=args.checkpoint_dir,
                                config=runConfig)
    cnn_hand_digit_classifier = learn.SKCompat(estimator)

    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    cnn_hand_digit_classifier.fit(x=train_features,
                                  y=train_labels,
                                  batch_size=500,
                                  steps=1000,
                                  monitors=[logging_hook])

    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }

    eval_results = cnn_hand_digit_classifier.score(x=eval_data,
                                                   y=eval_labels,
                                                   metrics=metrics)
    print(eval_results)
Пример #10
0
def main(unused_argv):
    # Load training and eval data
    mnist = learn.datasets.load_dataset("mnist")
    print(mnist)
    # train_data = mnist.train.images  # Returns np.array
    # train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
    # eval_data = mnist.test.images  # Returns np.array
    # eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    train_labels, train_data = getImage("train-00000-of-00001")

    # train_data =
    # train_labels =
    # # eval_data =
    # eval_labels =

    # Create the Estimator
    data_classifier = learn.Estimator(model_fn=cnn_model_fn,
                                      model_dir="/tmp/convnet_model")

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "softmax_tensor"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=10)

    # Train the model
    learn.SKCompat(data_classifier).fit(x=train_data,
                                        y=train_labels,
                                        batch_size=1,
                                        steps=21,
                                        monitors=[logging_hook])

    # Configure the accuracy metric for evaluation
    metrics = {
        "accuracy":
        learn.MetricSpec(metric_fn=tf.metrics.accuracy,
                         prediction_key="classes"),
    }
Пример #11
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)
    parser = argparse.ArgumentParser()
    parser.add_argument('--checkpoint_dir', type=str, default='save',
                        help='dir to save checkpoint in')
    args = parser.parse_args()
    mnist = learn.datasets.load_dataset("mnist")
    eval_data = mnist.test.images  # Returns np.array
    eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)

    estimator = learn.Estimator(
        model_fn=cnn_hand_digit_classifier_model,
        model_dir=args.checkpoint_dir,
    )

    cnn_hand_digit_classifier = learn.SKCompat(estimator)

    metrics = {
        "accuracy": learn.MetricSpec(
                        metric_fn=tf.metrics.accuracy, prediction_key="classes"),
    }

    eval_results = cnn_hand_digit_classifier.score(x=eval_data, y=eval_labels, metrics=metrics)
    print(eval_results)
Пример #12
0
            v = np.array(s.feature_lists.feature_list[column].feature[0].
                         float_list.value,
                         dtype=np.float32)
            data = np.zeros((10, ), dtype=np.float32)
            data[:v.shape[0] - 1] = v[:v.shape[0] - 1]
            data = np.array(data).reshape([10, -1])
            target = np.array(v[-1])
            yield data, target


if __name__ == '__main__':
    with tf.Session() as sess:
        db = "tensorflowdb"
        query = "select wet_bulb_temp from qclcd where wban = '14920' and time > now() - 30d group by wban"
        column = "column/wet_bulb_temp"
        regressor = tflearn.SKCompat(tflearn.Estimator(model_fn=model))

        inputs = []
        targets = []
        for data, target in influx(db, query, column):
            inputs.append(data)
            targets.append(target)

        inputs = np.array(inputs)
        print(inputs.shape)
        targets = np.array(targets)
        print(targets.shape)
        regressor.fit(inputs, targets, batch_size=20, steps=20)
        predicted = regressor.predict(inputs)
        #not used in this example but used for seeing deviations
        rmse = np.sqrt(((predicted - targets)**2).mean(axis=0))
Пример #13
0
    def _lstm_model(X, y):
        stacked_lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells(rnn_layers),
                                                   state_is_tuple=True)
        x_ = tf.unstack(X, axis=1, num=num_units)
        output, layers = tf.contrib.rnn.static_rnn(stacked_lstm,
                                                   x_,
                                                   dtype=dtypes.float32)
        output = dnn_layers(output[-1], dense_layers)
        prediction, loss = tflearn.models.linear_regression(output, y)
        train_op = tf.contrib.layers.optimize_loss(
            loss,
            tf.contrib.framework.get_global_step(),
            optimizer=optimizer,
            learning_rate=learning_rate)
        return prediction, loss, train_op

    return _lstm_model


log_dir = './ops_logs/sin'
time_steps = 3
rnn_layers = [{'num_units': 5}]
dense_layers = None
training_steps = 10000
print_steps = training_steps / 10
batch_size = 100

regressor = learn.SKCompat(
    learn.Estimator(model_fn=lstm_model(time_steps, rnn_layers, dense_layers),
                    model_dir=log_dir))
Пример #14
0
        y = tf.layers.dense(h, 1, tf.nn.sigmoid)

    loss = tf.losses.mean_squared_error(labels, y)

    train_op = tf.contrib.layers.optimize_loss(
        loss, tf.contrib.framework.get_global_step(), 0.1, "Adam")

    predictions = y

    return model_fn_lib.ModelFnOps(mode=mode,
                                   loss=loss,
                                   predictions=predictions,
                                   train_op=train_op)


estimator = learn.SKCompat(
    learn.Estimator(model_fn=cnn_model_fn, model_dir="test"))

data = np.asarray([[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 1.0]],
                  dtype=np.float32)
labels = np.asarray([[[0.0]], [[1.0]], [[1.0]], [[0.0]]], dtype=np.float32)

tf.logging.set_verbosity(tf.logging.INFO)
estimator.fit(x=data, y=labels, steps=2000)

eval_results = estimator.score(x=data, y=labels)
print(eval_results)

x = np.asarray([[0.0, 1.0]], dtype=np.float32)
print(estimator.predict(x))
Пример #15
0
def predict(pixels):
    cnn_dir = os.environ['VIRTUAL_ENV'] + "/mnist_convnet_model"
    estimator = learn.Estimator(model_fn=cnn_model_fn, model_dir=cnn_dir)
    mnist_classifier = learn.SKCompat(estimator)

    return mnist_classifier.predict(x=pixels, batch_size=1)
list(vocab_processor.fit_transform(tmp))

global n_words

#处理词汇
vocab_processor=learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH,
                                                        min_frequency=MIN_WORD_FREQUENCY)
x_train=np.array(list(vocab_processor.fit_transform(train_data)))
x_test=np.array(list(vocab_processor.transform(test_data)))
n_words=len(vocab_processor.vocabulary_)

print("Total words:%d "%n_words)


cate_dic = {'technology':1, 'car':2, 'entertainment':3, 'military':4, 'sports':5}
train_target=map(lambda x:cate_dic[x],train_target)
test_target=map(lambda x:cate_dic[x],test_target)

y_train=pandas.Series(train_target)
y_test=pandas.Series(test_target)

#构建模型
classifier=learn.SKCompat(learn.Estimator(model_fn=cnn_model))
classifier.fit(x_train,y_train,steps=1000)
y_predicted=classifier.predict(x_test)['class']
score=metrics.accuracy_score(y_test,y_predicted)

print("accuracy:{0:f}".format(score))


Пример #17
0

tf.logging.set_verbosity(tf.logging.WARN)

print("Loading mnist database")

# Training data (55k images)
mnist = learn.datasets.load_dataset("mnist")
images_training = mnist.train.images
labels_training = np.asarray(mnist.train.labels, dtype=np.int32)

# Test data (10k images)
images_test = mnist.test.images
labels_test = np.asarray(mnist.test.labels, dtype=np.int32)

# You can print some of the test images using display_test_image
# display_test_image(0)

# Building and train our classifier
feature_columns = learn.infer_real_valued_columns_from_input(images_training)
tensorflow_classifier = learn.LinearClassifier(n_classes=10,
                                               feature_columns=feature_columns)

classifier = learn.SKCompat(tensorflow_classifier)

classifier.fit(x=images_test, y=labels_test, batch_size=100, steps=1000)

# Evaluate accuracy
prediction = classifier.score(images_test, labels_test)
print("Accuracy: %f" % prediction['accuracy'])
Пример #18
0
from data_processing import load_csvdata
import xlrd
import random
import math
import xlrd

LOG_DIR = 'C:/Users/chak282/Downloads'
TIMESTEPS = 3
RNN_LAYERS = [{'num_units': 5}]
DENSE_LAYERS = None
TRAINING_STEPS = 10000
PRINT_STEPS = TRAINING_STEPS / 10
BATCH_SIZE = 100

regressor = learn.SKCompat(
    learn.Estimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, DENSE_LAYERS),
                    model_dir=LOG_DIR))

workbook = xlrd.open_workbook('RNN_data.xlsx')
sheet1 = workbook.sheet_by_name('RNN_data')
data = np.zeros(shape=sheet1.nrows)

for index1 in range(0, sheet1.nrows):
    data[index1] = sheet1.cell_value(index1, 1)

X, y = load_csvdata(data, TIMESTEPS, seperate=False)

# create a lstm instance and validation monitor
validation_monitor = learn.monitors.ValidationMonitor(
    X['val'], y['val'], every_n_steps=PRINT_STEPS, early_stopping_rounds=1000)
# print(X['train'])
Пример #19
0
        Y,
        open(
            data_path + "/y_set" + str(lstm.IN_TIMESTEPS) +
            str(lstm.OUT_TIMESTEPS_RANGE[-1]) + ".pkl", "wb"))
    print("Save data successfully!")

## build the lstm model
model_fn = lstm_model()
config = tf.contrib.learn.RunConfig(log_step_count_steps=200,
                                    save_checkpoints_steps=VALIDATION_STEPS //
                                    2)

estimator = learn.Estimator(model_fn=model_fn,
                            model_dir=LOG_DIR,
                            config=config)
regressor = learn.SKCompat(estimator)

## create a validation monitor
validation_monitor = learn.monitors.ValidationMonitor(
    X['val'], Y['val'], every_n_steps=VALIDATION_STEPS)

## fit the train dataset
# TRAINING_STEPS = 1
regressor.fit(X['train'],
              Y['train'],
              monitors=[validation_monitor],
              batch_size=BATCH_SIZE,
              steps=TRAINING_STEPS)

#todo: test average predicted error each step
#todo: add experiment, joint angles error
Пример #20
0
def main():
    # Configure.
    vocab_size = 10
    embed_dim = 10
    length = 4
    hidden_size = 10
    params = {
        'learning_rate':
        0.0005,
        'z_dim':
        10,
        'feature_processor':
        partial(sequence.embed_features,
                vocab_size=vocab_size,
                embed_dim=embed_dim),
        'generated_postprocess':
        sequence.outbed_generated,
        'generator':
        partial(sequence.sequence_generator,
                length=length,
                hidden_size=hidden_size),
    }
    if FLAGS.mode == 'gan':
        params.update({
            'discriminator':
            partial(sequence.sequence_discriminator,
                    length=length,
                    hidden_size=hidden_size),
            'loss_builder':
            model.make_gan_loss
        })
    elif FLAGS.mode == 'ebgan':
        params.update({
            'discriminator':
            partial(sequence.sequence_autoencoder_discriminator,
                    length=length,
                    hidden_size=hidden_size),
            'loss_builder':
            partial(model.make_ebgan_loss, epsilon=0.05)
        })
    tf.logging._logger.setLevel(logging.INFO)
    est = learn.SKCompat(
        learn.Estimator(model_fn=model.gan_model,
                        model_dir='models/gan_sorting/',
                        params=params))

    # Generate data.
    data = np.random.randint(0, vocab_size, (1000, length))
    data.sort()
    print([data[idx, :] for idx in range(5)])

    # Setup monitors.
    print_monitor = tf.train.LoggingTensorHook(
        ['loss_discr', 'loss_generator', 'Embed_1/generated_ids'],
        every_n_iter=100)

    # Train for a bit.
    est.fit(x=data,
            y=None,
            steps=10000,
            batch_size=32,
            monitors=[print_monitor])

    ## Evaluate.
    output = est.predict(x=np.zeros([1000, length], dtype=np.int32))

    # Compute accuracy.
    actual = output.copy()
    actual.sort()
    print('\n'.join([str(output[idx, :]) for idx in range(10)]))
    print("Accuracy: %f" %
          (float(np.sum(np.all(output == actual, 1))) / len(output)))