def test_tensorboard(self):
    df = TensorBoard.list()
    if not df.empty:
      for pid in df['pid']:
        TensorBoard.stop(int(pid))

    TensorBoard.start('./a')
    TensorBoard.start('./b')
    df = TensorBoard.list()
    self.assertEqual(2, len(df))
    self.assertEqual(set(df['logdir']), {'./a', './b'})
    for pid in df['pid']:
      TensorBoard.stop(pid)
示例#2
0
    def test_tensorboard(self):
        df = TensorBoard.list()
        if not df.empty:
            for pid in df['pid']:
                TensorBoard.stop(int(pid))

        TensorBoard.start('./a')
        TensorBoard.start('./b')
        df = TensorBoard.list()
        self.assertEqual(2, len(df))
        self.assertEqual(set(df['logdir']), {'./a', './b'})
        for pid in df['pid']:
            TensorBoard.stop(pid)
示例#3
0
def serving_input_fn():
  feature_placeholders = {
    'pickuplon' : tf.placeholder(tf.float32, [None]),
    'pickuplat' : tf.placeholder(tf.float32, [None]),
    'dropofflat' : tf.placeholder(tf.float32, [None]),
    'dropofflon' : tf.placeholder(tf.float32, [None]),
    'passengers' : tf.placeholder(tf.float32, [None]),
  }
  # You can transforma data here from the input format to the format expected by your model.
  features = feature_placeholders # no transformation needed
  return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
  
  def train_and_evaluate(output_dir, num_train_steps):
  estimator = tf.estimator.LinearRegressor(
                       model_dir = output_dir,
                       feature_columns = feature_cols)
  train_spec=tf.estimator.TrainSpec(
                       input_fn = read_dataset('./taxi-train.csv', mode = tf.estimator.ModeKeys.TRAIN),
                       max_steps = num_train_steps)
  exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
  eval_spec=tf.estimator.EvalSpec(
                       input_fn = read_dataset('./taxi-valid.csv', mode = tf.estimator.ModeKeys.EVAL),
                       steps = None,
                       start_delay_secs = 1, # start evaluating after N seconds
                       throttle_secs = 10,  # evaluate every N seconds
                       exporters = exporter)
  tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
  
  OUTDIR = 'taxi_trained'
TensorBoard().start(OUTDIR)


# to list Tensorboard instances
TensorBoard().list()

# to stop TensorBoard fill the correct pid below
TensorBoard().stop(27855)
print("Stopped Tensorboard")
        # 2: Call read_dataset passing in the training CSV file and the appropriate mode
        input_fn=read_dataset("train.csv", mode=tf.estimator.ModeKeys.TRAIN),
        max_steps=TRAIN_STEPS,
    )

    exporter = tf.estimator.LatestExporter("exporter", serving_input_fn)
    eval_spec = tf.estimator.EvalSpec(
        # 3: Call read_dataset passing in the evaluation CSV file and the appropriate mode
        input_fn=read_dataset("eval.csv", mode=tf.estimator.ModeKeys.EVAL),
        steps=None,
        start_delay_secs=60,  # start evaluating after N seconds
        throttle_secs=EVAL_INTERVAL,  # evaluate every N seconds
        exporters=exporter,
    )

    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)


# Run the model
shutil.rmtree("babyweight_trained", ignore_errors=True)  # start fresh each time
tf.summary.FileWriterCache.clear()  # ensure filewriter cache is clear for TensorBoard events file
train_and_evaluate("babyweight_trained")


from google.datalab.ml import TensorBoard

TensorBoard().start("./babyweight_trained")
for pid in TensorBoard.list()["pid"]:
    TensorBoard().stop(pid)
    print("Stopped TensorBoard with pid {}".format(pid))
OUTDIR=gs://${BUCKET}/mnist/trained_${MODEL_TYPE}
JOBNAME=mnist_${MODEL_TYPE}_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME    --region=$REGION    --module-name=trainer.task    --package-path=${PWD}/mnistmodel/trainer    --job-dir=$OUTDIR    --staging-bucket=gs://$BUCKET    --scale-tier=BASIC_GPU    --runtime-version=$TFVERSION    --    --output_dir=$OUTDIR    --train_steps=10000 --learning_rate=0.01 --train_batch_size=512    --model=$MODEL_TYPE --batch_norm


# ## Monitoring training with TensorBoard
# 
# Use this cell to launch tensorboard

# In[10]:


from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/mnist/trained_{}'.format(BUCKET, MODEL_TYPE))


# In[11]:


for pid in TensorBoard.list()['pid']:
  TensorBoard().stop(pid)
  print('Stopped TensorBoard with pid {}'.format(pid))


# Here are my results:
# 
# Model | Accuracy | Time taken | Model description | Run time parameters
# --- | :---: | ---
# linear | 91.53 | 3 min | linear | 100 steps, LR=0.01, Batch=512
示例#6
0
                              mode=tf.estimator.ModeKeys.EVAL),
        steps=None,
        start_delay_secs=1,  # start evaluating after N seconds
        throttle_secs=10,  # evaluate every N seconds
        exporters=exporter)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)


#%%
# Run training
OUTDIR = 'taxi_trained'
shutil.rmtree(OUTDIR, ignore_errors=True)  # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps=5000)

#%% [markdown]
# <h2> Monitoring with TensorBoard </h2>

#%%
from google.datalab.ml import TensorBoard
TensorBoard().start('./taxi_trained')
TensorBoard().list()

#%%
# to stop TensorBoard
for pid in TensorBoard.list()['pid']:
    TensorBoard().stop(pid)
    print('Stopped TensorBoard with pid {}'.format(pid))

#%% [markdown]
# Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
## before --\ is for ml-engine
## after --\ is for the task.py

# %bash
# OUTDIR=gs://${BUCKET}/babyweight/trained_model
# JOBNAME=babyweight_$(date -u +%y%m%d_%H%M%S)
# echo $OUTDIR $REGION $JOBNAME
# gsutil -m rm -rf $OUTDIR
# gcloud ml-engine jobs submit training $JOBNAME \
#   --region=$REGION \
#   --module-name=trainer.task \
#   --package-path=$(pwd)/babyweight/trainer \
#   --job-dir=$OUTDIR \
#   --staging-bucket=gs://$BUCKET \
#   --scale-tier=STANDARD_1 \
#   --runtime-version=$TFVERSION \
#   -- \
#   --bucket=${BUCKET} \
#   --output_dir=${OUTDIR} \
#   --train_examples=200000

## monitor in ml engine in GC console
## Visualize using tersorboard
from google.datalab.ml import TensorBoard

TensorBoard().start("gs://{}/babyweight/trained_model".format(BUCKET))

for pid in TensorBoard.list()["pid"]:
    TensorBoard().stop(pid)
    print("Stopped TensorBoard with pid {}".format(pid))
示例#8
0
MODEL_NAME=sharkID
MODEL_VERSION=v1
BUCKET=gcp-demo-acme
REGION='us-west1'
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/sharkID/sharkID_trained/export/exporter | tail -1)
#echo "Run these commands one-by-one (the very first time, you'll create a model and then create a version)"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version '1.8'


# # Test Prediction

# In[19]:


get_ipython().run_line_magic('bash', '')
# Checking prediction  
time gcloud ml-engine predict --model=sharkID --version=v3 --json-instances=../test.json


# # Examine Output Through Tensor-Board

# In[12]:


from google.datalab.ml import TensorBoard
TensorBoard().start('gs://'+BUCKET+'/sharkID')

    tf.feature_column.numeric_column('housing_median_age'),
    tf.feature_column.bucketized_column(tf.feature_column.numeric_column('latitude'), boundaries = np.arange(32.0, 42, 1).tolist()),
    tf.feature_column.numeric_column('avg_rooms_per_house'),
    tf.feature_column.numeric_column('avg_persons_per_room'),
    tf.feature_column.numeric_column('median_income')
  ]
  
  
  
# Define your feature columns
def create_feature_cols():
  return [
    tf.feature_column.numeric_column('housing_median_age'),
    tf.feature_column.bucketized_column(tf.feature_column.numeric_column('latitude'), boundaries = np.arange(32.0, 42, 1).tolist()),
    tf.feature_column.numeric_column('avg_rooms_per_house'),
    tf.feature_column.numeric_column('avg_persons_per_room'),
    tf.feature_column.numeric_column('median_income')
  ]
  
  
  
  # Launch tensorboard
from google.datalab.ml import TensorBoard

OUTDIR = './trained_model'
TensorBoard().start(OUTDIR)


# Run the model
shutil.rmtree(OUTDIR, ignore_errors = True)
train_and_evaluate(OUTDIR, 2000)
示例#10
0
# In[20]:


get_ipython().run_cell_magic('bash', '', '## note: using --scale-tier=BASIC_GPU?\n#for MODEL in linear dnn cnn rnn rnn2 rnnN; do\nfor MODEL in rnn2; do\n  OUTDIR=gs://${BUCKET}/sinewaves/${MODEL}\n  JOBNAME=sines_${MODEL}_$(date -u +%y%m%d_%H%M%S)\n  gsutil -m rm -rf $OUTDIR\n  gcloud ml-engine jobs submit training $JOBNAME \\\n     --region=$REGION \\\n     --module-name=sinemodel.task \\\n     --package-path=${PWD}/sinemodel \\\n     --job-dir=$OUTDIR \\\n     --staging-bucket=gs://$BUCKET \\\n     --scale-tier=BASIC_GPU \\\n     --runtime-version=$TFVERSION \\\n     -- \\\n     --train_data_path="gs://${BUCKET}/sines/train*.csv" \\\n     --eval_data_path="gs://${BUCKET}/sines/valid*.csv"  \\\n     --output_dir=$OUTDIR \\\n     --train_steps=3000 --sequence_length=$SEQ_LEN --model=$MODEL \\\n     --eval_delay_secs=10 --min_eval_frequency=20      ## currently NOT hard-coded any longer\ndone')


# ## Monitor training with TensorBoard
# 
# Use this cell to launch tensorboard. If tensorboard appears blank try refreshing after 5 minutes

# In[21]:


from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/sinewaves'.format(BUCKET))


# In[19]:


for pid in TensorBoard.list()['pid']:
  TensorBoard().stop(pid)
  print('Stopped TensorBoard with pid {}'.format(pid))


# ## Results
# 
# Complete the below table with your own results! Then compare your results to the results in the solution notebook.
# 
# | Model  | Sequence length | # of steps | Training time   | RMSE |
示例#11
0
        steps=None,
        start_delay_secs=1,  # start evaluating after N seconds
        throttle_secs=10,  # evaluate every N seconds
        exporters=exporter)

    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)


# <h2> Monitoring with TensorBoard </h2>
# <br/>
# Use "refresh" in Tensorboard during training to see progress.

# In[ ]:

OUTDIR = './taxi_trained'
TensorBoard().start(OUTDIR)

# <h2>Run training</h2>

# In[ ]:

# Run training
shutil.rmtree(OUTDIR, ignore_errors=True)  # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps=2000)

# <h4> You can now shut Tensorboard down </h4>

# In[ ]:

# to list Tensorboard instances
TensorBoard().list()
示例#12
0
# Perform in session
x = tf.constant([3,5,7], name='x')  # name the tensors and the operations
y = tf.constant([1,2,3], name='y')
z1 = tf.add(x, y, name='z1')
z2 = x * y
z3 = x2 - x1

with tf.session() as sess:
    # it will write the graph to the directory 'summaries'
    with tf.summary.FileWriter('summaries', sess.graph) as writer:
        a1, a3 = sess.run([z1, z3])

# Use Tensorboard to visualize
from google.datalab.ml import TensorBoard

TensorBoard().start('./summaries')





# Variables
def forward_pass(w, x):
    return tf.matmul(w, x)

def train_loop(x, niter=5):
    with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
        w = tf.get_variable(
            "weights", shape=(1,2),
            initializer=tf.truncated_normal_initializer(),
            trainable=True
示例#13
0
        './taxi-train.csv', mode=tf.estimator.ModeKeys.TRAIN),
                                        max_steps=num_train_steps)
    exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
    eval_spec = tf.estimator.EvalSpec(
        input_fn=read_dataset('./taxi-valid.csv',
                              mode=tf.estimator.ModeKeys.EVAL),
        steps=None,
        start_delay_secs=1,  # start evaluating after N seconds
        throttle_secs=10,  # evaluate every N seconds
        exporters=exporter)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)


### Monitoring with TensorBoard

OUTDIR = 'taxi_trained'
TensorBoard().start(OUTDIR)

### Run training

shutil.rmtree(OUTDIR, ignore_errors=True)  # start fresh each time
train_and_evaluate(OUTDIR, num_train_steps=2000)

### shut Tensorboard down

# to list Tensorboard instances
TensorBoard().list()

# to stop TensorBoard fill the correct pid below
TensorBoard().stop(27855)
print("Stopped Tensorboard")
    'bash', ' ',
    'DECODE_FILE=data/poetry/rumi_leads.txt\ncat ${DECODE_FILE}.*.decodes')

# Some of these are still phrases and not complete sentences. This indicates that we might need to train longer or better somehow. We need to diagnose the model ...
# <p>
#
# ### Diagnosing training run
#
# <p>
# Let's diagnose the training run to see what we'd improve the next time around.
# (Note that this package may not be present on Jupyter -- `pip install pydatalab` if necessary)

# In[33]:

from google.datalab.ml import TensorBoard
TensorBoard().start('gs://{}/poetry/model_full'.format(BUCKET))

# In[34]:

for pid in TensorBoard.list()['pid']:
    TensorBoard().stop(pid)
    print('Stopped TensorBoard with pid {}'.format(pid))

# <table>
# <tr>
# <td><img src="diagrams/poetry_loss.png"/></td>
# <td><img src="diagrams/poetry_acc.png"/></td>
# </table>
# Looking at the loss curve, it is clear that we are overfitting (note that the orange training curve is well below the blue eval curve). Both loss curves and the accuracy-per-sequence curve, which is our key evaluation measure, plateaus after 40k. (The red curve is a faster way of computing the evaluation metric, and can be ignored). So, how do we improve the model? Well, we need to reduce overfitting and make sure the eval metrics keep going down as long as the loss is also going down.
# <p>
# What we really need to do is to get more data, but if that's not an option, we could try to reduce the NN and increase the dropout regularization. We could also do hyperparameter tuning on the dropout and network sizes.
FROM `cloud-training-demos.GA360_test.ga_sessions_sample`,   
  UNNEST(hits) AS hits
WHERE 
  # only include hits on pages
  hits.type = "PAGE"
  AND (SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) = \"{}\"
LIMIT 1""".format(content_ids[0])
recommended_title = bq.Query(recommended_title_sql).execute().result().to_dataframe()['title'].tolist()[0]
current_title = bq.Query(current_title_sql).execute().result().to_dataframe()['title'].tolist()[0]
print("Current title: {} ".format(current_title))
print("Recommended title: {}".format(recommended_title))


# ### Tensorboard
# 
# As usual, we can monitor the performance of our training job using Tensorboard. 

# In[20]:


from google.datalab.ml import TensorBoard
TensorBoard().start('content_based_model_trained')


# In[21]:


for pid in TensorBoard.list()['pid']:
  TensorBoard().stop(pid)
  print("Stopped TensorBoard with pid {}".format(pid))
示例#16
0
# ```
# Eval results: {'global_step': 1000, 'loss': 0.7359053, 'top_1_accuracy': 0.82954544, 'top_5_accuracy': 1.0}
# ```

# In[ ]:

get_ipython().run_cell_magic(
    'bash', '', 'gsutil ls gs://${BUCKET}/tpu/resnet/trained/export/')

# You can look at the training charts with TensorBoard:

# In[ ]:

OUTDIR = 'gs://{}/tpu/resnet/trained/'.format(BUCKET)
from google.datalab.ml import TensorBoard
TensorBoard().start(OUTDIR)

# In[ ]:

TensorBoard().stop(11531)
print("Stopped Tensorboard")

# These were the charts I got (I set smoothing to be zero):
# <img src="resnet_traineval.png" height="50"/>
# As you can see, the final blue dot (eval) is quite close to the lowest training loss, indicating that the model hasn't overfit.  The top_1 accuracy on the evaluation dataset, however, is 80% which isn't that great. More data would help.
# <img src="resnet_accuracy.png" height="50"/>

# ## Deploying and predicting with model
#
# Deploy the model: