Esempio n. 1
0
def main(_):
  tfe.enable_eager_execution()
  # Ground-truth constants.
  true_w = [[-2.0], [4.0], [1.0]]
  true_b = [0.5]
  noise_level = 0.01

  # Training constants.
  batch_size = 64
  learning_rate = 0.1

  print("True w: %s" % true_w)
  print("True b: %s\n" % true_b)

  model = LinearModel()
  dataset = synthetic_dataset(true_w, true_b, noise_level, batch_size, 20)

  device = "gpu:0" if tfe.num_gpus() else "cpu:0"
  print("Using device: %s" % device)
  with tf.device(device):
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    fit(model, dataset, optimizer, verbose=True, logdir=FLAGS.logdir)

  print("\nAfter training: w = %s" % model.variables[0].numpy())
  print("\nAfter training: b = %s" % model.variables[1].numpy())
Esempio n. 2
0
def main(_):
  tfe.enable_eager_execution()

  (device, data_format) = ('/gpu:0', 'channels_first')
  if FLAGS.no_gpu or tfe.num_gpus() <= 0:
    (device, data_format) = ('/cpu:0', 'channels_last')
  print('Using device %s, and data format %s.' % (device, data_format))

  # Load the datasets
  train_ds = dataset.train(FLAGS.data_dir).shuffle(60000).batch(
      FLAGS.batch_size)
  test_ds = dataset.test(FLAGS.data_dir).batch(FLAGS.batch_size)

  # Create the model and optimizer
  model = mnist.Model(data_format)
  optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum)

  if FLAGS.output_dir:
    # Create directories to which summaries will be written
    # tensorboard --logdir=<output_dir>
    # can then be used to see the recorded summaries.
    train_dir = os.path.join(FLAGS.output_dir, 'train')
    test_dir = os.path.join(FLAGS.output_dir, 'eval')
    tf.gfile.MakeDirs(FLAGS.output_dir)
  else:
    train_dir = None
    test_dir = None
  summary_writer = tf.contrib.summary.create_file_writer(
      train_dir, flush_millis=10000)
  test_summary_writer = tf.contrib.summary.create_file_writer(
      test_dir, flush_millis=10000, name='test')
  checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
  step_counter = tf.train.get_or_create_global_step()
  checkpoint = tfe.Checkpoint(
      model=model, optimizer=optimizer, step_counter=step_counter)
  # Restore variables on creation if a checkpoint exists.
  checkpoint.restore(tf.train.latest_checkpoint(FLAGS.checkpoint_dir))
  # Train and evaluate for 10 epochs.
  with tf.device(device):
    for _ in range(10):
      start = time.time()
      with summary_writer.as_default():
        train(model, optimizer, train_ds, step_counter, FLAGS.log_interval)
      end = time.time()
      print('\nTrain time for epoch #%d (%d total steps): %f' %
            (checkpoint.save_counter.numpy() + 1,
             step_counter.numpy(),
             end - start))
      with test_summary_writer.as_default():
        test(model, test_ds)
      checkpoint.save(checkpoint_prefix)
Esempio n. 3
0
def main(_):
  tfe.enable_eager_execution()

  (device, data_format) = ('/gpu:0', 'channels_first')
  if FLAGS.no_gpu or tfe.num_gpus() <= 0:
    (device, data_format) = ('/cpu:0', 'channels_last')
  print('Using device %s, and data format %s.' % (device, data_format))

  # Load the datasets
  (train_ds, test_ds) = load_data(FLAGS.data_dir)
  train_ds = train_ds.shuffle(60000).batch(FLAGS.batch_size)

  # Create the model and optimizer
  model = MNISTModel(data_format)
  optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum)

  if FLAGS.output_dir:
    train_dir = os.path.join(FLAGS.output_dir, 'train')
    test_dir = os.path.join(FLAGS.output_dir, 'eval')
    tf.gfile.MakeDirs(FLAGS.output_dir)
  else:
    train_dir = None
    test_dir = None
  summary_writer = tf.contrib.summary.create_file_writer(
      train_dir, flush_millis=10000)
  test_summary_writer = tf.contrib.summary.create_file_writer(
      test_dir, flush_millis=10000, name='test')
  checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')

  with tf.device(device):
    for epoch in range(1, 11):
      with tfe.restore_variables_on_create(
          tf.train.latest_checkpoint(FLAGS.checkpoint_dir)):
        global_step = tf.train.get_or_create_global_step()
        start = time.time()
        with summary_writer.as_default():
          train_one_epoch(model, optimizer, train_ds, FLAGS.log_interval)
        end = time.time()
        print('\nTrain time for epoch #%d (global step %d): %f' % (
            epoch, global_step.numpy(), end - start))
      with test_summary_writer.as_default():
        test(model, test_ds)
      all_variables = (
          model.variables
          + optimizer.variables()
          + [global_step])
      tfe.Saver(all_variables).save(
          checkpoint_prefix, global_step=global_step)
Esempio n. 4
0
cs20.stanford.edu
Chip Huyen ([email protected]) & Akshay Agrawal ([email protected])
Lecture 04
"""

import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe

import utils
from examples import word2vec_utils

tfe.enable_eager_execution()

# Model hyperparameters
VOCAB_SIZE = 50000
BATCH_SIZE = 128
EMBED_SIZE = 128            # dimension of the word embedding vectors
SKIP_WINDOW = 1             # the context window
NUM_SAMPLED = 64            # number of negative examples to sample
LEARNING_RATE = 1.0
NUM_TRAIN_STEPS = 100000
VISUAL_FLD = 'visualization'
SKIP_STEP = 5000

# Parameters for downloading data
DOWNLOAD_URL = 'http://mattmahoney.net/dc/text8.zip'
EXPECTED_BYTES = 31344016
Esempio n. 5
0
def main(argv):
  parser = MNISTEagerArgParser()
  flags = parser.parse_args(args=argv[1:])

  tfe.enable_eager_execution()

  # Automatically determine device and data_format
  (device, data_format) = ('/gpu:0', 'channels_first')
  if flags.no_gpu or tfe.num_gpus() <= 0:
    (device, data_format) = ('/cpu:0', 'channels_last')
  # If data_format is defined in FLAGS, overwrite automatically set value.
  if flags.data_format is not None:
    data_format = flags.data_format
  print('Using device %s, and data format %s.' % (device, data_format))

  # Load the datasets
  train_ds = mnist_dataset.train(flags.data_dir).shuffle(60000).batch(
      flags.batch_size)
  test_ds = mnist_dataset.test(flags.data_dir).batch(flags.batch_size)

  # Create the model and optimizer
  model = mnist.create_model(data_format)
  optimizer = tf.train.MomentumOptimizer(flags.lr, flags.momentum)

  # Create file writers for writing TensorBoard summaries.
  if flags.output_dir:
    # Create directories to which summaries will be written
    # tensorboard --logdir=<output_dir>
    # can then be used to see the recorded summaries.
    train_dir = os.path.join(flags.output_dir, 'train')
    test_dir = os.path.join(flags.output_dir, 'eval')
    tf.gfile.MakeDirs(flags.output_dir)
  else:
    train_dir = None
    test_dir = None
  summary_writer = tf.contrib.summary.create_file_writer(
      train_dir, flush_millis=10000)
  test_summary_writer = tf.contrib.summary.create_file_writer(
      test_dir, flush_millis=10000, name='test')

  # Create and restore checkpoint (if one exists on the path)
  checkpoint_prefix = os.path.join(flags.model_dir, 'ckpt')
  step_counter = tf.train.get_or_create_global_step()
  checkpoint = tfe.Checkpoint(
      model=model, optimizer=optimizer, step_counter=step_counter)
  # Restore variables on creation if a checkpoint exists.
  checkpoint.restore(tf.train.latest_checkpoint(flags.model_dir))

  # Train and evaluate for a set number of epochs.
  with tf.device(device):
    for _ in range(flags.train_epochs):
      start = time.time()
      with summary_writer.as_default():
        train(model, optimizer, train_ds, step_counter, flags.log_interval)
      end = time.time()
      print('\nTrain time for epoch #%d (%d total steps): %f' %
            (checkpoint.save_counter.numpy() + 1,
             step_counter.numpy(),
             end - start))
      with test_summary_writer.as_default():
        test(model, test_ds)
      checkpoint.save(checkpoint_prefix)
Esempio n. 6
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 21 16:48:51 2018

@author: xsxsz
"""

import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe

tfe.enable_eager_execution()
a=tf.constant(2)
b=tf.constant(3)
c=a+b
d=a*b

print('a=%i'%a)
print('-----------')
print('b=%i'%b)
print('-----------')
print('c=%i'%c)
print('-----------')
print('d=%i'%d)
print('-----------')
Esempio n. 7
0
from __future__ import print_function

import tensorflow as tf
import tensorflow.contrib.eager as tfe
import numpy as np
# Set Eager API
tfe.enable_eager_execution()  # 启动动态图(默认为静态图)
# 动态图中 tensor 转 numpy  a.numpy()  a is tensor
# numpy to tensor  tf.convert_to_tensor(a) a is numpy

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)


def pad_image(image, pad_h=32, pad_w=32):
    h, w = image.shape[:2]
    assert h < pad_h
    assert w < pad_w

    err_h = pad_h - h
    top_pad = err_h // 2
    bottom_pad = err_h - top_pad

    err_w = pad_w - w
    left_pad = err_w // 2
    right_pad = err_w - left_pad

    padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
    image = np.pad(image, padding, mode='constant', constant_values=0)
    return image
Esempio n. 8
0
def main(argv):
    parser = MNISTEagerArgParser()
    flags = parser.parse_args(args=argv[1:])

    tfe.enable_eager_execution()

    # Automatically determine device and data_format
    (device, data_format) = ('/gpu:0', 'channels_first')
    if flags.no_gpu or tfe.num_gpus() <= 0:
        (device, data_format) = ('/cpu:0', 'channels_last')
    # If data_format is defined in FLAGS, overwrite automatically set value.
    if flags.data_format is not None:
        data_format = flags.data_format
    print('Using device %s, and data format %s.' % (device, data_format))

    # Load the datasets
    train_ds = mnist_dataset.train(flags.data_dir).shuffle(60000).batch(
        flags.batch_size)
    test_ds = mnist_dataset.test(flags.data_dir).batch(flags.batch_size)

    # Create the model and optimizer
    model = mnist.create_model(data_format)
    optimizer = tf.train.MomentumOptimizer(flags.lr, flags.momentum)

    # Create file writers for writing TensorBoard summaries.
    if flags.output_dir:
        # Create directories to which summaries will be written
        # tensorboard --logdir=<output_dir>
        # can then be used to see the recorded summaries.
        train_dir = os.path.join(flags.output_dir, 'train')
        test_dir = os.path.join(flags.output_dir, 'eval')
        tf.gfile.MakeDirs(flags.output_dir)
    else:
        train_dir = None
        test_dir = None
    summary_writer = tf.contrib.summary.create_file_writer(train_dir,
                                                           flush_millis=10000)
    test_summary_writer = tf.contrib.summary.create_file_writer(
        test_dir, flush_millis=10000, name='test')

    # Create and restore checkpoint (if one exists on the path)
    checkpoint_prefix = os.path.join(flags.model_dir, 'ckpt')
    step_counter = tf.train.get_or_create_global_step()
    checkpoint = tfe.Checkpoint(model=model,
                                optimizer=optimizer,
                                step_counter=step_counter)
    # Restore variables on creation if a checkpoint exists.
    checkpoint.restore(tf.train.latest_checkpoint(flags.model_dir))

    # Train and evaluate for a set number of epochs.
    with tf.device(device):
        for _ in range(flags.train_epochs):
            start = time.time()
            with summary_writer.as_default():
                train(model, optimizer, train_ds, step_counter,
                      flags.log_interval)
            end = time.time()
            print('\nTrain time for epoch #%d (%d total steps): %f' %
                  (checkpoint.save_counter.numpy() + 1, step_counter.numpy(),
                   end - start))
            with test_summary_writer.as_default():
                test(model, test_ds)
            checkpoint.save(checkpoint_prefix)
from __future__ import absolute_import, division, print_function, unicode_literals
# from keras import backend as K
import tensorflow as tf
import keras
import numpy as np
import tensorflow.contrib.eager as tfe
from keras.preprocessing import sequence
from sklearn.model_selection import train_test_split
import time
from keras.initializers import Constant
from helpers import loadGloveModel, evaluate_acc, hotflip_attack, get_pred, attack, attack_dataset

print(tf.__version__)
if __name__ == '__main__':
    # 1.0 get the data
    tfe.enable_eager_execution(device_policy=tfe.DEVICE_PLACEMENT_SILENT)
    imdb = keras.datasets.imdb
    num_features = 20000
    (train_data,
     train_labels), (test_data,
                     test_labels) = imdb.load_data(num_words=num_features)
    print("Training entries: {}, labels: {}".format(len(train_data),
                                                    len(train_labels)))
    maxlen = 80
    x_train = sequence.pad_sequences(train_data, maxlen=maxlen)
    x_test = sequence.pad_sequences(test_data, maxlen=maxlen)
    print('x_train shape:', x_train.shape)
    print('x_test shape:', x_test.shape)
    # 1.1 get the word indices
    word_index = imdb.get_word_index()
    # The first indices are reserved
Esempio n. 10
0
# In[ ]:

# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import os
import glob
from pathlib import Path
import numpy as np  # linear algebra
import pandas as pd  # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflow.contrib.eager as tfe
tfe.enable_eager_execution()  #enable the eager mode before doing any operation

from keras.preprocessing import image
from skimage.io import imread, imsave, imshow
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split

np.random.seed(111)
color = sns.color_palette()
get_ipython().run_line_magic('matplotlib', 'inline')

# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory

from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
Esempio n. 11
0
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import sys
import cv2

from svsutils import Slide

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tfe.enable_eager_execution(config=config)

print('\nslide at 5x')
slide_path = '/home/nathan/data/ccrcc/TCGA_KIRC/'
slide_path += 'TCGA-A3-3346-01Z-00-DX1.95280216-fd71-4a03-b452-6e3d667f2542.svs'
preprocess_fn = lambda x: (x * (2 / 255.)) - 1
s = Slide(slide_path=slide_path,
          process_mag=5,
          process_size=128,
          preprocess_fn=preprocess_fn)
s.print_info()

ds = tf.data.Dataset.from_generator(generator=s.generator,
                                    output_types=tf.float32)
ds = tfe.Iterator(ds)

for idx, x in enumerate(ds):
    print x.shape, x.dtype
    # cv2.imwrite('debug/{}.jpg'.format(idx), x.numpy()[:,:,::-1])
Esempio n. 12
0
can be converted to a graph that can be further optimized and/or extracted
for deployment in production without changing code. " - Rajat Monga
'''
from __future__ import absolute_import, division, print_function

import numpy as np
import tensorflow as tf
import tensorflow.contrib.eager as tfe

# Set Eager API
print("Setting Eager mode...")
'''
报错:tf.enable_eager_execution must be called at program startup.
这时候需要重启spyder的kernal即可.应该是清除之前的graph模式.
'''
tfe.enable_eager_execution()  #启动热模式,这样输入什么就运行什么

# Define constant tensors
print("Define constant tensors")
a = tf.constant(2)
print("a = %i" % a)
b = tf.constant(3)
print("b = %i" % b)

# Run the operation without the need for tf.Session
print("Running operations, without tf.Session")
c = a + b
print("a + b = %i" % c)
d = a * b
print("a * b = %i" % d)
Esempio n. 13
0
def func_cuRadiomics(yaml_addr, image, mask):
    tfe.enable_eager_execution()
    a = tf.constant(value=1)
    _RadiomicsGLCMModule = tf.load_op_library('./build/libRadiomics.so').radiomics

    features_name_glcm = ["Autocorrelation",
                          "JointAverage",
                          "ClusterProminence",
                          "ClusterShade",
                          "ClusterTendency",
                          "Contrast",
                          "Correlation",
                          "DifferenceAverage",
                          "DifferenceEntropy",
                          "DifferenceVariance",
                          "JointEnergy",
                          "JointEntropy",
                          "Imc1",
                          "Imc2",
                          "Idm",
                          "Idmn",
                          "Id",
                          "Idn",
                          "InverseVariance",
                          "MaximumProbability",
                          "SumAverage",
                          "SumEntropy",
                          "SumSquares"
                          ]
    features_name_firstorder = \
        ["Energy",
         "Entropy",
         "Minimum",
         "TenthPercentile",
         "NintiethPercentile",
         "Maximum",
         "Mean",
         "Median",
         "InterquartileRange",
         "Range",
         "MAD",
         "rMAD",
         "RMS",
         "StandardDeviation",
         "Skewness",
         "Kurtosis",
         "Varianc",
         "Uniformity"]


    # Reading Parameters if Feature Extraction
    f = open(yaml_addr)
    parameters = yaml.load(f)
    #Range = parameters['Range']
    FirstOrder = parameters['FirstOrder']
    GLCM = parameters['GLCM']
    label = parameters['label']



    # arr_shape = normed_arr_img0.shape
    Range = [np.min(image).astype('int'), np.max(image).astype('int')]
    SETTING = np.array([Range[0], Range[1], FirstOrder, GLCM, label])

    #arr_shape = image.shape
    image[np.where(mask != label)] = -1

    arr_features = _RadiomicsGLCMModule(image, SETTING)
    NumOfFeatures = 0
    Names = []
    if GLCM == 1:
        Names = Names + features_name_glcm
        NumOfFeatures += 23
    if FirstOrder == 1:
        Names = Names + features_name_firstorder
        NumOfFeatures += 18

    arr_features = np.reshape(arr_features, newshape=[NumOfFeatures, image.shape[0]])

    RadiomicsFeatures = {}
    for i in range(len(Names)):
        RadiomicsFeatures[Names[i]] = arr_features[i, :]

    return RadiomicsFeatures