예제 #1
0
    def __init__(self, data_format):
        """Creates a model for classifying a hand-written digit.

    Args:
      data_format: Either 'channels_first' or 'channels_last'.
    """
        super(Mnist, self).__init__()
        if data_format == "channels_first":
            self._input_shape = [-1, 1, 28, 28]
        else:
            assert data_format == "channels_last"
            self._input_shape = [-1, 28, 28, 1]

        self.conv1 = tf_layers.Conv2D(32,
                                      5,
                                      padding="same",
                                      data_format=data_format,
                                      activation=nn.relu)
        self.conv2 = tf_layers.Conv2D(64,
                                      5,
                                      padding="same",
                                      data_format=data_format,
                                      activation=nn.relu)
        self.fc1 = tf_layers.Dense(1024, activation=nn.relu)
        self.fc2 = tf_layers.Dense(10)
        self.dropout = tf_layers.Dropout(0.4)
        self.max_pool2d = tf_layers.MaxPooling2D((2, 2), (2, 2),
                                                 padding="same",
                                                 data_format=data_format)
    def __init__(self, data_format):
        """Creates a model for classifying a hand-written digit.

    Args:
      data_format: Either "channels_first" or "channels_last".
        "channels_first" is typically faster on GPUs while "channels_last" is
        typically faster on CPUs. See
        https://www.tensorflow.org/performance/performance_guide#data_formats
    """
        super(Model, self).__init__()
        self._input_shape = [-1, 28, 28, 1]

        self.conv1 = layers.Conv2D(32,
                                   5,
                                   padding="same",
                                   data_format=data_format,
                                   activation=nn.relu)
        self.conv2 = layers.Conv2D(64,
                                   5,
                                   padding="same",
                                   data_format=data_format,
                                   activation=nn.relu)
        self.fc1 = layers.Dense(1024, activation=nn.relu)
        self.fc2 = layers.Dense(10)
        self.dropout = layers.Dropout(0.4)
        self.max_pool2d = layers.MaxPooling2D((2, 2), (2, 2),
                                              padding="same",
                                              data_format=data_format)
 def body(_, i):
   i += 1
   x, yt = it.get_next()
   dense = layers.Dense(nclass)
   y = dense(x)
   loss = losses.sparse_softmax_cross_entropy(yt, y)
   opt = adam.AdamOptimizer()
   train_op = opt.minimize(loss, var_list=dense.trainable_weights)
   with ops.control_dependencies([train_op]):
     loss = array_ops.identity(loss)
   return loss, i
예제 #4
0
def build_gru_model2():
    # 带有dropout,循环层堆叠(增加循环层层数)
    model = Sequential()
    model.add(
        layers.GRU(32,
                   dropout=0.1,
                   recurrent_dropout=0.5,
                   return_sequences=True,
                   input_shape=(None, float_data.shape[-1])))
    model.add(
        layers.GRU(64, activation='relu', dropout=0.1, recurrent_dropout=0.5))
    model.add(layers.Dense(1))
예제 #5
0
def create_fc_per_eg_grad(batch_size, activation_size, num_layers):
  inp = random_ops.random_normal([batch_size, activation_size])
  layers = [
      tf_layers.Dense(activation_size, activation=nn.relu)
      for _ in range(num_layers)
  ]
  projection = tf_layers.Dense(1)

  def model_fn(activation):
    for layer in layers:
      activation = layer(activation)
    activation = projection(activation)
    activation = nn.l2_loss(activation)
    return gradient_ops.gradients(activation, variables.trainable_variables())

  def loop_fn(i):
    return model_fn(array_ops.expand_dims(array_ops.gather(inp, i), 0))

  pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
  loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
  while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
  return pfor_outputs, while_outputs
예제 #6
0
 def __init__(self, activation_size, num_layers):
     self._layers = [
         tf_layers.Dense(activation_size, activation=nn.relu)
         for _ in range(num_layers)
     ]
예제 #7
0
# -*- coding: utf-8 -*- 
# @Time 2020/5/16 18:17
# @Author wcy

import random
import gym
import numpy as np
from tensorflow.python.keras import models
from tensorflow.python.layers import layers

env = gym.make("CartPole-v0")  # 加载游戏环境

STATE_DIM, ACTION_DIM = 4, 2  # State 维度 4, Action 维度 2
model = models.Sequential([
    layers.Dense(64, input_dim=STATE_DIM, activation='relu'),
    layers.Dense(20, activation='relu'),
    layers.Dense(ACTION_DIM, activation='linear')
])
model.summary()  # 打印神经网络信息


def generate_data_one_episode():
    '''生成单次游戏的训练数据'''
    x, y, score = [], [], 0
    state = env.reset()
    while True:
        action = random.randrange(0, 2)
        x.append(state)
        y.append([1, 0] if action == 0 else [0, 1]) # 记录数据
        state, reward, done, _ = env.step(action) # 执行动作
        score += reward
예제 #8
0
 def __init__(self, num_actions: int, num_hidden_units: int):
     super(ActorCritic, self).__init__()
     self.common = layers.Dense(num_hidden_units, activation="relu")
     self.actor = layers.Dense(num_actions)
     self.critic = layers.Dense(1)