コード例 #1
0
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D

import matplotlib.pyplot as plt
import numpy as np

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

import warnings
warnings.filterwarnings("ignore")


# cost function
def cost_func(x=None, y=None):
    '''Cost function.
    For visualizing contour plot, call f() and collect placeholder nodes for fast GPU calc.
    To incorporate variables to optimize, pass them in as argument to attach as x and y.
    Args:
        x: None if placeholder tensor is used as input. Specify x to use x as input tensor.
        y: None if placeholder tensor is used as input. Specify y to use y as input tensor.
    Returns:
        Tuple (x, y, z) where x and y are input tensors and z is output tensor.
    '''
    if not x:
        x = tf.placeholder(tf.float32, shape=[None, 1])
    if not y:
        y = tf.placeholder(tf.float32, shape=[None, 1])

    # two local minima near (0, 0)
    z = __f1(x, y)
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    np.random.seed(FLAGS.seed)
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)
    tf1.disable_v2_behavior()

    session = tf1.Session()
    with session.as_default():
        x_train, y_train, x_test, y_test = utils.load(FLAGS.dataset, session)
        n_train = x_train.shape[0]

        num_classes = int(np.amax(y_train)) + 1
        model = lenet5(n_train, x_train.shape[1:], num_classes)
        for l in model.layers:
            l.kl_cost_weight = l.add_weight(
                name='kl_cost_weight',
                shape=(),
                initializer=tf.constant_initializer(0.),
                trainable=False)
            l.kl_cost_bias = l.add_variable(
                name='kl_cost_bias',
                shape=(),
                initializer=tf.constant_initializer(0.),
                trainable=False)

        [negative_log_likelihood, accuracy, log_likelihood, kl,
         elbo] = get_losses_and_metrics(model, n_train)
        metrics = [elbo, log_likelihood, kl, accuracy]
        tensorboard = tf1.keras.callbacks.TensorBoard(
            log_dir=FLAGS.output_dir,
            update_freq=FLAGS.batch_size * FLAGS.validation_freq)

        def fit_fn(model, steps, initial_epoch):
            return model.fit(
                x=x_train,
                y=y_train,
                batch_size=FLAGS.batch_size,
                epochs=initial_epoch + (FLAGS.batch_size * steps) // n_train,
                initial_epoch=initial_epoch,
                validation_data=(x_test, y_test),
                validation_freq=max(
                    (FLAGS.validation_freq * FLAGS.batch_size) // n_train, 1),
                verbose=1,
                callbacks=[tensorboard])

        model.compile(
            optimizer=tf.keras.optimizers.Adam(lr=float(FLAGS.learning_rate)),
            loss=negative_log_likelihood,
            metrics=metrics)
        session.run(tf1.initialize_all_variables())

        train_epochs = (FLAGS.training_steps * FLAGS.batch_size) // n_train
        fit_fn(model, FLAGS.training_steps, initial_epoch=0)

        labels = tf.keras.layers.Input(shape=y_train.shape[1:])
        ll = tf.keras.backend.function([model.input, labels], [
            model.output.distribution.log_prob(tf.squeeze(labels)),
            model.output.distribution.logits
        ])

        base_metrics = [
            utils.ensemble_metrics(x_train, y_train, model, ll, n_samples=10),
            utils.ensemble_metrics(x_test, y_test, model, ll, n_samples=10)
        ]
        model_dir = os.path.join(FLAGS.output_dir, 'models')
        tf.io.gfile.makedirs(model_dir)
        base_model_filename = os.path.join(model_dir, 'base_model.weights')
        model.save_weights(base_model_filename)

        # Train base model further for comparison.
        fit_fn(model,
               FLAGS.n_auxiliary_variables *
               FLAGS.auxiliary_sampling_frequency * FLAGS.ensemble_size,
               initial_epoch=train_epochs)

        overtrained_metrics = [
            utils.ensemble_metrics(x_train, y_train, model, ll, n_samples=10),
            utils.ensemble_metrics(x_test, y_test, model, ll, n_samples=10)
        ]

        # Perform refined VI.
        sample_op = []
        for l in model.layers:
            if isinstance(
                    l, tfp.layers.DenseLocalReparameterization) or isinstance(
                        l, tfp.layers.Convolution2DFlipout):
                weight_op, weight_cost = sample_auxiliary_op(
                    l.kernel_prior.distribution,
                    l.kernel_posterior.distribution,
                    FLAGS.auxiliary_variance_ratio)
                sample_op.append(weight_op)
                sample_op.append(l.kl_cost_weight.assign_add(weight_cost))
                # Fix the variance of the prior
                session.run(l.kernel_prior.distribution.istrainable.assign(0.))
                if hasattr(l.bias_prior, 'distribution'):
                    bias_op, bias_cost = sample_auxiliary_op(
                        l.bias_prior.distribution,
                        l.bias_posterior.distribution,
                        FLAGS.auxiliary_variance_ratio)
                    sample_op.append(bias_op)
                    sample_op.append(l.kl_cost_bias.assign_add(bias_cost))
                    # Fix the variance of the prior
                    session.run(
                        l.bias_prior.distribution.istrainable.assign(0.))

        ensemble_filenames = []
        for i in range(FLAGS.ensemble_size):
            model.load_weights(base_model_filename)
            for j in range(FLAGS.n_auxiliary_variables):
                session.run(sample_op)
                model.compile(
                    optimizer=tf.keras.optimizers.Adam(
                        # The learning rate is proportional to the scale of the prior.
                        lr=float(FLAGS.learning_rate_for_sampling *
                                 np.sqrt(1. -
                                         FLAGS.auxiliary_variance_ratio)**j)),
                    loss=negative_log_likelihood,
                    metrics=metrics)
                fit_fn(model,
                       FLAGS.auxiliary_sampling_frequency,
                       initial_epoch=train_epochs)
            ensemble_filename = os.path.join(
                model_dir, 'ensemble_component_' + str(i) + '.weights')
            ensemble_filenames.append(ensemble_filename)
            model.save_weights(ensemble_filename)

        auxiliary_metrics = [
            utils.ensemble_metrics(x_train,
                                   y_train,
                                   model,
                                   ll,
                                   weight_files=ensemble_filenames,
                                   n_samples=10),
            utils.ensemble_metrics(x_test,
                                   y_test,
                                   model,
                                   ll,
                                   weight_files=ensemble_filenames,
                                   n_samples=10)
        ]

        for metrics, name in [(base_metrics, 'Base model'),
                              (overtrained_metrics, 'Overtrained model'),
                              (auxiliary_metrics, 'Auxiliary sampling')]:
            logging.info(name)
            for metrics_dict, split in [(metrics[0], 'train'),
                                        (metrics[1], 'test')]:
                logging.info(split)
                for metric_name in metrics_dict:
                    logging.info('%s: %s', metric_name,
                                 metrics_dict[metric_name])
コード例 #3
0
def console_entry_point():
    tf.disable_v2_behavior()
    tf.app.run(main)
コード例 #4
0
ファイル: dl_utils.py プロジェクト: sniafas/ML-Projects
import os
import tensorflow.compat.v1 as tf
from numba import cuda

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'  # Suppress tf messages
tf.disable_v2_behavior()  # Enable tf v1 behavior as in v2 a lot have changed


def gpu_release():
    '''
    Release gpu memory
    '''

    device = cuda.get_current_device()
    device.reset()


def gpu_session():
    '''
    Creates a gpu session
    :return: tf session
    '''
    config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
                            log_device_placement=False,
                            allow_soft_placement=True)
    sess = tf.Session(config=config)

    return sess
コード例 #5
0
def neural_network(num_iter=1000):
    tf.disable_v2_behavior()

    # Reset the graph
    tf.reset_default_graph()

    # Setting a seed
    tf.set_random_seed(4155)
    # Construct each possible point pair (x,t) to feed the neural network
    Nx = 10
    Nt = 10
    x = np.linspace(0, 1, Nx)  #from 0 to 1 (sin function)
    t = np.linspace(0, 1, Nt)

    X, T = np.meshgrid(x, t)

    x_ = (X.ravel()).reshape(-1, 1)
    t_ = (T.ravel()).reshape(-1, 1)

    x_tf = tf.convert_to_tensor(x_)
    t_tf = tf.convert_to_tensor(t_)  #converts x and t to tensors

    points = tf.concat([x_tf, t_tf], 1)  #concatenates to one dimention

    num_hidden_neurons = [20, 20]
    num_hidden_layers = np.size(num_hidden_neurons)

    with tf.variable_scope('nn'):  #DeepNeuralNetwork
        # Input layer
        previous_layer = points

        # Hidden layers
        for l in range(num_hidden_layers):
            current_layer = tf.layers.dense(previous_layer, \
                                            num_hidden_neurons[l],\
                                            activation=tf.nn.sigmoid)
            previous_layer = current_layer

        # Output layer
        nn_output = tf.layers.dense(previous_layer, 1)
        #Dense implements the operation:
        #output = activation(dot(input, kernel) + bias)

    # Set up the cost function
    def u(x):
        return tf.sin(np.pi * x)  #This is initial condition

    #Trial solution
    with tf.name_scope('cost'):
        trial = (1 - t_tf) * u(x_tf) + x_tf * (1 - x_tf) * t_tf * nn_output

        trial_dt = tf.gradients(trial, t_tf)
        trial_d2x = tf.gradients(tf.gradients(trial, x_tf), x_tf)

        err = tf.square(trial_dt[0] - trial_d2x[0])
        cost = tf.reduce_sum(err, name='cost')

    # Define how the neural network should be trained
    learning_rate = 0.001
    with tf.name_scope('train'):
        optimizer = tf.train.AdamOptimizer(learning_rate)
        traning_op = optimizer.minimize(cost)
        #Adam is an optimization algorithm that can be used instead of the classical
        #stochastic gradient descent procedure to update network weights iterative
        #based in training data. Could also use GradientDescentOptimizer

    # Reference variable to the output from the network
    u_nn = None

    # Define a node that initializes all the nodes within the computational graph
    # for TensorFlow to evaluate
    init = tf.global_variables_initializer()

    with tf.Session() as sess:  #Class for tunning tensorflow operations
        # Initialize the computational graph
        init.run()

        #print('Initial cost: %g'%cost.eval())

        for i in range(num_iter):
            sess.run(traning_op)

        #print('Final cost: %g'%cost.eval())

        u_nn = trial.eval()

    u_e = np.exp(-np.pi**2 * t_) * np.sin(
        np.pi * x_)  #exact/analytical solution

    U_nn = u_nn.reshape(
        (Nt, Nx)).T  #de første og den siste er 0. Hva betyr det?
    U_e = u_e.reshape((Nt, Nx)).T

    return U_nn, U_e, x
コード例 #6
0
ファイル: deep_reg.py プロジェクト: arnaupujol/nnsc
"""
DEEP REG

This module defines the methods to make a regression from a deep neural network.

:Author: Arnau Pujol <*****@*****.**>

:Version: 1.0.1
"""


import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()#It enables to use the behaviour from tensorflow 1
import scipy.io as sio
from copy import deepcopy as dp

def leaky_relu(z, name=None):
  """
  This method operates a Leaky ReLU on the value z.

  Parameters:
  -----------
  z: float
    Value to which we operate.
  name: str
    A name for the operation (optional).

  Returns:
  --------
  float
コード例 #7
0
'''
문3) iris.csv 데이터 파일을 이용하여 선형회귀모델  생성하시오.
     [조건1] x변수 : 2,3칼럼,  y변수 : 1칼럼
     [조건2] 7:3 비율(train/test set)
         train set : 모델 생성, test set : 모델 평가  
     [조건3] learning_rate=0.01
     [조건4] 학습 횟수 1,000회
     [조건5] model 평가 - MSE출력 
'''

import pandas as pd
import tensorflow.compat.v1 as tf  # ver 1.x
tf.disable_v2_behavior()  # ver 2.x 사용안함
from sklearn.metrics import mean_squared_error  # model 평가
from sklearn.preprocessing import minmax_scale  # 정규화
from sklearn.model_selection import train_test_split  # train/test set

iris = pd.read_csv('C:/ITWILL/6_Tensorflow/data/iris.csv')
print(iris.info())
cols = list(iris.columns)
iris_df = iris[cols[:3]]

# 1. x data, y data
x_data = iris_df[cols[1:3]]  # x train
y_data = iris_df[cols[0]]  # y tran

# 2. x,y 정규화(0~1)
x_data = minmax_scale(x_data)

# 3. train/test data set 구성
x_train, x_test, y_train, y_test = train_test_split(x_data,
コード例 #8
0
def main(_):
  tf.disable_v2_behavior()
  tf.enable_resource_variables()

  if FLAGS.hparams is None:
    hparams = hparams_flags.hparams_from_flags()
  else:
    hparams = hparams_lib.HParams(FLAGS.hparams)

  cluster = None
  if FLAGS.use_tpu and FLAGS.master is None:
    if FLAGS.tpu_name:
      cluster = tf.distribute.cluster_resolver.TPUClusterResolver(
          FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
    else:
      cluster = tf.distribute.cluster_resolver.TPUClusterResolver()
      tf.config.experimental_connect_to_cluster(cluster)
      tf.tpu.experimental.initialize_tpu_system(cluster)

  session_config = tf.ConfigProto()
  # Workaround for https://github.com/tensorflow/tensorflow/issues/26411 where
  # convolutions (used in blurring) get confused about data-format when used
  # inside a tf.data pipeline that is run on GPU.
  if (tf.test.is_built_with_cuda() and
      not hparams.input_data.preprocessing.defer_blurring):
    # RewriterConfig.OFF = 2
    session_config.graph_options.rewrite_options.layout_optimizer = 2
  run_config = tf_estimator.tpu.RunConfig(
      master=FLAGS.master,
      cluster=cluster,
      model_dir=FLAGS.model_dir,
      save_checkpoints_steps=FLAGS.save_interval_steps,
      keep_checkpoint_max=FLAGS.max_checkpoints_to_keep,
      keep_checkpoint_every_n_hours=(FLAGS.keep_checkpoint_interval_secs /
                                     (60.0 * 60.0)),
      log_step_count_steps=100,
      session_config=session_config,
      tpu_config=tf_estimator.tpu.TPUConfig(
          iterations_per_loop=FLAGS.steps_per_loop,
          per_host_input_for_training=True,
          experimental_host_call_every_n_steps=FLAGS.summary_interval_steps,
          tpu_job_name='train_tpu_worker' if FLAGS.mode == 'train' else None,
          eval_training_input_configuration=(
              tf_estimator.tpu.InputPipelineConfig.SLICED if FLAGS.use_tpu else
              tf_estimator.tpu.InputPipelineConfig.PER_HOST_V1)))
  params = {
      'hparams': hparams,
      'use_tpu': FLAGS.use_tpu,
      'data_dir': FLAGS.data_dir,
  }
  estimator = tf_estimator.tpu.TPUEstimator(
      model_fn=model_fn,
      use_tpu=FLAGS.use_tpu,
      config=run_config,
      params=params,
      train_batch_size=hparams.bs,
      eval_batch_size=hparams.eval.batch_size)

  if hparams.input_data.input_fn not in dir(inputs):
    raise ValueError('Unknown input_fn: {hparams.input_data.input_fn}')
  input_fn = getattr(inputs, hparams.input_data.input_fn)

  training_set_size = inputs.get_num_train_images(hparams)
  steps_per_epoch = training_set_size / hparams.bs
  stage_1_epochs = hparams.stage_1.training.train_epochs
  stage_2_epochs = hparams.stage_2.training.train_epochs
  total_steps = int((stage_1_epochs + stage_2_epochs) * steps_per_epoch)

  num_eval_examples = inputs.get_num_eval_images(hparams)
  eval_steps = num_eval_examples // hparams.eval.batch_size

  if FLAGS.mode == 'eval':
    for ckpt_str in tf.train.checkpoints_iterator(
        FLAGS.model_dir,
        min_interval_secs=FLAGS.eval_interval_secs,
        timeout=60 * 60):
      result = estimator.evaluate(
          input_fn=input_fn, checkpoint_path=ckpt_str, steps=eval_steps)
      estimator.export_saved_model(
          os.path.join(FLAGS.model_dir, 'exports'),
          lambda: input_fn(tf_estimator.ModeKeys.PREDICT, params),
          checkpoint_path=ckpt_str)
      if result['global_step'] >= total_steps:
        return
  else:  # 'train' or 'train_then_eval'.
    estimator.train(input_fn=input_fn, max_steps=total_steps)
    if FLAGS.mode == 'train_then_eval':
      result = estimator.evaluate(input_fn=input_fn, steps=eval_steps)
      estimator.export_saved_model(
          os.path.join(FLAGS.model_dir, 'exports'),
          lambda: input_fn(tf_estimator.ModeKeys.PREDICT, params))
コード例 #9
0
import tensorflow as tf
from tensorflow.compat.v1 import disable_eager_execution, disable_control_flow_v2, disable_v2_behavior, disable_tensor_equality

disable_tensor_equality()
disable_eager_execution()
disable_control_flow_v2()
disable_v2_behavior()
コード例 #10
0
# https://deeplearningcourses.com/c/advanced-computer-vision
# https://www.udemy.com/advanced-computer-vision
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future

import tensorflow.compat.v1 as tf
import numpy as np
import tensorflow.compat.v1.keras as keras
import tensorflow.compat.v1.keras.backend as K

tf.disable_v2_behavior()  #code have been written for TF1


def custom_softmax(x):
    m = tf.reduce_max(x, 1)
    x = x - m
    e = tf.exp(x)
    return e / tf.reduce_sum(e, -1)


a = np.random.randn(1, 1000)

tfy = tf.nn.softmax(a)
ky = keras.activations.softmax(K.variable(a))
tfc = custom_softmax(a)

session = K.get_session()

tfy_ = session.run(tfy)
コード例 #11
0
import sys
import warnings

import cython
import keras
import skimage
import skimage.io
import tensorflow.compat.v1 as tensorflow
from bird_image_classification_mva.logger.logging import get_logger
from keras.engine import saving
from pycocotools.coco import COCO
from tqdm import tqdm

logger = get_logger(__name__)

tensorflow.disable_v2_behavior()
ROOT_DIR = os.path.abspath("./")
warnings.filterwarnings("ignore")
# Import Mask RCNN
sys.path.append(ROOT_DIR)  # To find local version of the library
import mrcnn.model as modellib
from mrcnn import utils, visualize
from mrcnn.config import Config

# Import COCO config
sys.path.append(os.path.join(ROOT_DIR,
                             "samples/coco/"))  # To find local version
import coco

# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
コード例 #12
0
ファイル: run.py プロジェクト: peternara/simclr-self-training
    if FLAGS.mode == 'eval':
        for ckpt in tf.train.checkpoints_iterator(run_config.model_dir,
                                                  min_interval_secs=15):
            try:
                result = perform_evaluation(estimator=estimator,
                                            input_fn=data_lib.build_input_fn(
                                                builder, False),
                                            eval_steps=eval_steps,
                                            model=model,
                                            num_classes=num_classes,
                                            checkpoint_path=ckpt)
            except tf.errors.NotFoundError:
                continue
            if result['global_step'] >= train_steps:
                return
    else:
        estimator.train(data_lib.build_input_fn(builder, True),
                        max_steps=train_steps)
        if FLAGS.mode == 'train_then_eval':
            perform_evaluation(estimator=estimator,
                               input_fn=data_lib.build_input_fn(
                                   builder, False),
                               eval_steps=eval_steps,
                               model=model,
                               num_classes=num_classes)


if __name__ == '__main__':
    tf.disable_v2_behavior()  # Disable eager mode when running with TF2.
    app.run(main)
def console_entry_point():
    tf.disable_v2_behavior()
    tf.logging.set_verbosity(tf.logging.INFO)
    app.run(main)
コード例 #14
0
'''
Created on Dec 18, 2018
Tensorflow Implementation of the Baseline Model, BPRMF, in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang ([email protected])
'''
# import tensorflow as tf    # Changed by GTL
import tensorflow.compat.v1 as tf  # Changed by GTL
tf.disable_v2_behavior()  # Changed by GTL

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


class BPRMF(object):
    def __init__(self, data_config, pretrain_data, args):
        self.model_type = 'mf'
        self.pretrain_data = pretrain_data

        self.n_users = data_config['n_users']
        self.n_items = data_config['n_items']

        self.lr = args.lr

        self.emb_dim = args.embed_size
        self.batch_size = args.batch_size

        self.regs = eval(args.regs)

        self.verbose = args.verbose