コード例 #1
0
def inference(input_shape, N_CLASSES):
    '''build the model
    args:
        image: image batch, 4D tensor [batch_size, width, height, channels=3], dtype=tf.float32
    return:
        output tensor with the computed logits, float, [batch_size, n_classes]
    '''

    inputs = Input(shape=input_shape)

    K.set_learning_phase(
        False)  # all new operations will be in test mode from now on

    ## Conv layer 1
    x = dn_layer(inputs=inputs, name='layer1')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='layer1_maxpool')(x)

    ## Conv layer 2
    x = dn_layer(inputs=x, num_filters=32, name='layer2')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='layer2_maxpool')(x)

    ## Conv layer 3
    x = dn_layer(inputs=x, num_filters=64, name='layer3')
    x = MaxPooling2D((2, 2), strides=(2, 2), name='layer3_maxpool')(x)

    x = GlobalAveragePooling2D(name='GlobalAveragePooling2D')(x)

    #x = Flatten(name='flatten')(x)
    x = Dense(N_CLASSES, activation='softmax', name='predictions')(x)

    model = Model(inputs=inputs, outputs=x)
    return model
コード例 #2
0
ファイル: base.py プロジェクト: saadmahboob/polyaxon
    def _call_graph_fn(self, features, labels=None):
        """Calls graph function.

        Args:
            features: `Tensor` or `dict` of tensors
            labels: `Tensor` or `dict` of tensors
        """
        set_learning_phase(Modes.is_train(self.mode))

        kwargs = {}
        if 'labels' in get_arguments(self._graph_fn):
            kwargs['labels'] = labels
        return self._graph_fn(mode=self.mode, features=features, **kwargs)
コード例 #3
0
    def build(self,alpha, img_input, temp_softmax):

        shape = (1, 1, int(1024 * alpha))
	"""
	This looks dangerous. Not sure how the model would get affected with the laarning_phase variable set to True.
	"""
        
        K.set_learning_phase(True)

	with tf.name_scope('teacher') as scope:

	    self.conv1 = Conv2D(
                        int(32*alpha),
                        (3,3),
                        padding='same',
                        use_bias=False,
                        strides=(1,1),
                        name='teacher_conv1', trainable=self.trainable)(img_input)
            self.conv2 = BatchNormalization(axis=-1, name='teacher_conv1_bn', trainable=self.trainable)(self.conv1)
            self.conv3 = Activation(self.relu6, name='teacher_conv1_relu', trainable=self.trainable)(self.conv2)

	    self.conv4 = self._depthwise_conv_block(self.conv3, 64, alpha, depth_multiplier, block_id = 15)
	    self.conv5 = self._depthwise_conv_block(self.conv4, 128, alpha, depth_multiplier,strides=(2, 2), block_id =16)
	    self.conv6 =self. _depthwise_conv_block(self.conv5, 128, alpha, depth_multiplier,block_id =17)
	    self.conv7 = self._depthwise_conv_block(self.conv6, 256, alpha, depth_multiplier, strides=(2,2),block_id =18)
	    self.conv8 = self._depthwise_conv_block(self.conv7, 256, alpha, depth_multiplier, block_id =19)
	    self.conv9 = self._depthwise_conv_block(self.conv8, 512, alpha, depth_multiplier, strides = (2,2), block_id =20)
	    self.conv10 = self._depthwise_conv_block(self.conv9, 512, alpha, depth_multiplier, block_id =21)
	    self.conv11 = self._depthwise_conv_block(self.conv10, 512, alpha, depth_multiplier, block_id =22)
	    self.conv12 = self._depthwise_conv_block(self.conv11, 512, alpha, depth_multiplier, block_id =23)
	    self.conv13 = self._depthwise_conv_block(self.conv12, 512, alpha, depth_multiplier, block_id =24)
	    self.conv14 = self._depthwise_conv_block(self.conv13, 512, alpha, depth_multiplier, block_id =25)
	    self.conv15 = self._depthwise_conv_block(self.conv14, 1024, alpha, depth_multiplier,strides=(2,2), block_id =26)
	    self.conv16 = self._depthwise_conv_block(self.conv15, 1024, alpha, depth_multiplier, block_id =27)

            self.conv17 = GlobalAveragePooling2D()(self.conv16)
            self.conv18 = Reshape(shape, name='teacher_reshape_1', trainable=self.trainable)(self.conv17)
	
            self.conv19 = Dropout(0.5, name='teacher_dropout', trainable=self.trainable)(self.conv18)
            self.conv20 = Conv2D(self.num_classes, (1, 1), padding='same', name='teacher_conv_preds', trainable=self.trainable)(self.conv18)
            self.conv21 = Activation('softmax', name='teacher_act_softmax', trainable=self.trainable)(tf.divide(self.conv20, temp_softmax))
            self.conv22 = Reshape((self.num_classes,), name='teacher_reshape_2', trainable=self.trainable)(self.conv21)

        return self
コード例 #4
0
ファイル: base.py プロジェクト: saadmahboob/polyaxon
    def _call_graph_fn(self, features, labels=None):
        """Calls graph function.

        Creates first one or two graph, i.e. train and target graphs.
        Return the optimal action given an exploration policy.

        If `is_dueling` is set to `True`,
        then another layer is added that represents the state value.

        Args:
            inputs: `Tensor` or `dict` of tensors
        """
        set_learning_phase(Modes.is_train(self.mode))

        graph_fn = self._build_graph_fn()
        self._graph_results = graph_fn(mode=self.mode,
                                       features=features,
                                       labels=labels)
        return self._build_actions()
コード例 #5
0
ファイル: base.py プロジェクト: saadmahboob/polyaxon
    def _call_graph_fn(self, features, labels=None):
        """Calls graph function.

        Creates first one or two graph, i.e. train and target graphs.
        Return the optimal action given an exploration policy.

        If `is_dueling` is set to `True`,
        then another layer is added that represents the state value.

        Args:
            features: `Tensor` or `dict` of tensors
            labels: `Tensor` or `dict` of tensors
        """
        set_learning_phase(Modes.is_train(self.mode))

        graph_fn = self._build_graph_fn()

        if self.use_target_graph:
            # We create 2 graphs: a training graph and a target graph,
            # so that we can copy one graph to another given a frequency.
            self._train_graph = FunctionModule(mode=self.mode,
                                               build_fn=graph_fn,
                                               name='train')
            self._train_results = self._train_graph(features=features,
                                                    labels=labels)
            self._target_graph = FunctionModule(mode=self.mode,
                                                build_fn=graph_fn,
                                                name='target')
            self._target_results = self._target_graph(features=features,
                                                      labels=labels)
            return self._build_actions()
        else:
            self._train_results = graph_fn(mode=self.mode,
                                           features=features,
                                           labels=labels)
            self._target_results = self._train_results
            return self._build_actions()
コード例 #6
0
# https://www.tensorflow.org/extend/estimators
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

# numpy
import numpy as np

# tensorflow
import tensorflow as tf

# keras
from tensorflow.contrib.keras.python.keras.layers import Dense, Conv2D, Dropout, MaxPooling2D, Flatten
from tensorflow.contrib.keras.python.keras import backend as K
K.set_learning_phase(1)  #set learning phase

# input data
from tensorflow.examples.tutorials.mnist import input_data

# estimators
from tensorflow.contrib import learn

# estimator "builder"
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib

import time


def time_usage(func):
    def wrapper(*args, **kwargs):
        beg_ts = time.time()
コード例 #7
0
def load_inference_model(model_path):
    K.set_learning_phase(0)
    return load_model(model_path)
コード例 #8
0
if __name__ == '__main__':
    batch_size = 16
    nb_epoch = 400

    optimizer = 'adam'
    log_dir = './keras_logs/simple11'
    old_session = K.get_session()

    X_train, y_train, X_test, y_test = load_data()
    datagen = ImageDataGenerator(rotation_range=15, zoom_range=0.20)
    datagen.fit(X_train)

    with tf.Graph().as_default():
        session = tf.Session('')
        K.set_session(session)
        K.set_learning_phase(1)

        es_cb = callbacks.EarlyStopping(monitor='val_loss',
                                        patience=200,
                                        verbose=0,
                                        mode='auto')
        lr_cb = callbacks.LearningRateScheduler(
            lambda epoch: float(learning_rates[epoch]))

        model = mk_model_with_bn()
        model.summary()

        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])
コード例 #9
0
    def build(self, rgb, num_classes, temp_softmax, train_mode):
        K.set_learning_phase(True)
        # conv1_1
        with tf.name_scope('mentor_conv1_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(rgb, kernel, [1, 1, 1, 1], padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[64],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #	out = tf.nn.bias_add(conv, biases)
            #       mean , var = tf.nn.moments(out, axes= [0])
            #      out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv1_1 = tf.nn.relu(conv, name=scope)
            self.conv1_1 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv1_1')(
                                                  self.conv1_1)
            #self.conv1_1 = Dropout((0.4))(self.conv1_1)
            self.parameters += [kernel, biases]

        with tf.name_scope('mentor_conv1_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.conv1_1,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0, shape=[64],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv1_2 = tf.nn.relu(conv, name=scope)
            self.conv1_2 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv1_2')(
                                                  self.conv1_2)
            self.parameters += [kernel, biases]

        self.pool1 = tf.nn.max_pool(self.conv1_2,
                                    ksize=[1, 2, 2, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME',
                                    name='mentor_pool1')
        with tf.name_scope('mentor_conv2_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.pool1,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[128],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv2_1 = tf.nn.relu(conv, name=scope)
            self.conv2_1 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv2_1')(
                                                  self.conv2_1)
            #self.conv2_1 = Dropout((0.4))(self.conv2_1)
            self.parameters += [kernel, biases]

        with tf.name_scope('mentor_conv2_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.conv2_1,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[128],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv2_2 = tf.nn.relu(conv, name=scope)
            self.conv2_2 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv2_2')(
                                                  self.conv2_2)
            self.parameters += [kernel, biases]

        self.pool2 = tf.nn.max_pool(self.conv2_2,
                                    ksize=[1, 2, 2, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME',
                                    name='mentor_pool2')

        with tf.name_scope('mentor_conv3_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.pool2,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[256],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            # mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv3_1 = tf.nn.relu(conv, name=scope)
            self.conv3_1 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv3_1')(
                                                  self.conv3_1)
            #self.conv3_1 = Dropout((0.4))(self.conv3_1)
            self.parameters += [kernel, biases]

        with tf.name_scope('mentor_conv3_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.conv3_1,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[256],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv3_2 = tf.nn.relu(conv, name=scope)
            self.conv3_2 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv3_2')(
                                                  self.conv3_2)
            #self.conv3_2 = Dropout((0.4))(self.conv3_2)
            self.parameters += [kernel, biases]

        with tf.name_scope('mentor_conv3_3') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.conv3_2,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[256],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv3_3 = tf.nn.relu(conv, name=scope)
            self.conv3_3 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv3_3')(
                                                  self.conv3_3)
            self.parameters += [kernel, biases]
        self.pool3 = tf.nn.max_pool(self.conv3_3,
                                    ksize=[1, 2, 2, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME',
                                    name='mentor_pool3')

        with tf.name_scope('mentor_conv4_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.pool3,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[512],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv4_1 = tf.nn.relu(conv, name=scope)
            self.conv4_1 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv4_1')(
                                                  self.conv4_1)
            #self.conv4_1 = Dropout((0.4))(self.conv4_1)
            self.parameters += [kernel, biases]

    # conv5_1
        with tf.name_scope('mentor_conv4_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.conv4_1,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[512],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv4_2 = tf.nn.relu(conv, name=scope)
            self.conv4_2 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv4_2')(
                                                  self.conv4_2)
            #self.conv4_2 = Dropout((0.4))(self.conv4_2)
            self.parameters += [kernel, biases]

        with tf.name_scope('mentor_conv4_3') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.conv4_2,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[512],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv4_3 = tf.nn.relu(conv, name=scope)
            self.conv4_3 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv4_3')(
                                                  self.conv4_3)
            self.parameters += [kernel, biases]
        self.pool4 = tf.nn.max_pool(self.conv4_3,
                                    ksize=[1, 2, 2, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME',
                                    name='mentor_pool4')
        with tf.name_scope('mentor_conv5_1') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.pool4,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[512],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv5_1 = tf.nn.relu(conv, name=scope)
            self.conv5_1 = BatchNormalization(axis=-1,
                                              name='mentor_bn_conv5_1')(
                                                  self.conv5_1)
            #self.conv5_1 = Dropout((0.4))(self.conv5_1)
            self.parameters += [kernel, biases]
        with tf.name_scope('mentor_conv5_2') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.conv5_1,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[512],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv5_2 = tf.nn.relu(conv, name=scope)
            self.conv5_2 = BatchNormalization(
                axis=-1, name='mentor_batch_norm_conv5_2')(self.conv5_2)
            #self.conv5_2 = Dropout((0.4))(self.conv5_2)
            self.parameters += [kernel, biases]

        with tf.name_scope('mentor_conv5_3') as scope:
            kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512],
                                                     dtype=tf.float32,
                                                     stddev=1e-2),
                                 trainable=self.trainable,
                                 name='mentor_weights')
            conv = tf.nn.conv2d(self.conv5_2,
                                kernel, [1, 1, 1, 1],
                                padding='SAME')
            biases = tf.Variable(tf.constant(0.0,
                                             shape=[512],
                                             dtype=tf.float32),
                                 trainable=self.trainable,
                                 name='mentor_biases')
            #out = tf.nn.bias_add(conv, biases)
            #mean , var = tf.nn.moments(out, axes= [0])
            #out = (out - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.conv5_3 = tf.nn.relu(conv, name=scope)
            self.conv5_3 = BatchNormalization(
                axis=-1, name='mentor_batch_norm_conv5_3')(self.conv5_3)
            self.parameters += [kernel, biases]

        self.pool5 = tf.nn.max_pool(self.conv5_3,
                                    ksize=[1, 2, 2, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME',
                                    name='mentor_pool5')
        # fc1
        with tf.name_scope('mentor_fc1') as scope:
            shape = int(np.prod(self.pool5.get_shape()[1:]))
            fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
                                                   dtype=tf.float32,
                                                   stddev=1e-2),
                               trainable=self.trainable,
                               name='mentor_weights')
            fc1b = tf.Variable(tf.constant(1.0, shape=[4096],
                                           dtype=tf.float32),
                               trainable=self.trainable,
                               name='mentor_biases')
            pool5_flat = tf.reshape(self.pool5, [-1, shape])
            fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
            #mean , var = tf.nn.moments(fc1l, axes= [0])
            #fc1l = (fc1l - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.fc1 = tf.nn.relu(fc1l)
            self.fc1 = BatchNormalization(axis=-1,
                                          name='mentor_batch_norm_fc1')(
                                              self.fc1)
            # self.fc1 = Dropout((0.4))(self.fc1)
            #self.fc1 = tf.nn.dropout(self.fc1, 0.5)
            self.parameters += [fc1w, fc1b]

        with tf.name_scope('mentor_fc2') as scope:
            fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
                                                   dtype=tf.float32,
                                                   stddev=1e-2),
                               trainable=self.trainable,
                               name='mentor_weights')
            fc2b = tf.Variable(tf.constant(1.0, shape=[4096],
                                           dtype=tf.float32),
                               trainable=self.trainable,
                               name='mentor_biases')
            fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
            #mean , var = tf.nn.moments(fc2l, axes= [0])
            #fc2l = (fc2l - mean)/tf.sqrt(var + tf.Variable(1e-10))
            self.fc2 = tf.nn.relu(fc2l)
            self.fc2 = BatchNormalization(axis=-1,
                                          name='mentor_batch_norm_fc2')(
                                              self.fc2)
            if train_mode == True:
                self.fc2 = tf.nn.dropout(self.fc2, 0.5)
            self.parameters += [fc2w, fc2b]

        with tf.name_scope('mentor_fc3') as scope:
            fc3w = tf.Variable(tf.truncated_normal([4096, num_classes],
                                                   dtype=tf.float32,
                                                   stddev=1e-2),
                               trainable=self.trainable,
                               name='mentor_weights')
            fc3b = tf.Variable(tf.constant(1.0,
                                           shape=[num_classes],
                                           dtype=tf.float32),
                               trainable=self.trainable,
                               name='mentor_biases')
            self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
            #self.fc3l = tf.nn.relu(fc3l)
            self.parameters += [fc3w, fc3b]

        self.softmax = tf.nn.softmax(self.fc3l / temp_softmax)

        return self
コード例 #10
0
ファイル: graph.py プロジェクト: saadmahboob/polyaxon
    def from_config(cls, mode, features, labels, config):  # pylint: disable=arguments-differ
        """Instantiates a Graph container from its config (output of `get_config()`).

        Arguments:
            mode:
            features:
            labels:
            config: Model config dictionary.

        Returns:
            A model instance.

        Raises:
            ValueError: In case of improperly formatted config dict.
        """
        # set the training mode
        set_learning_phase(Modes.is_train(mode))

        if not isinstance(config, GraphConfig):
            config = GraphConfig.from_dict(config)

        # layer instances created during
        # the graph reconstruction process
        created_layers = {}

        # Create an input layer based on the defined inputs and features
        for layer in config.input_layers:
            layer_name, node_index, tensor_index = cls.get_node_data(layer)
            if layer_name in features:
                created_layers[layer_name] = InputLayer(
                    input_tensor=features[layer_name], name=layer_name)
            elif isinstance(labels, Mapping) and layer_name in labels:
                created_layers[layer_name] = InputLayer(
                    input_tensor=labels[layer_name], name=layer_name)
            else:
                raise ConfigurationError(
                    "Input `{}`is not found".format(layer_name))

        def process_layer(layer):
            """Deserialize a layer, then call it on appropriate inputs.

            Arguments:
                layer_data: layer config dict.

            Raises:
                ValueError: In case of improperly formatted `layer_data` dict.
            """
            layer_class = layer.IDENTIFIER
            layer_name = layer.name

            # Instantiate layer.
            if layer_class in LAYERS:
                created_layer = LAYERS[layer_class].from_config(layer)
            elif layer_class in IMAGE_PROCESSORS:
                created_layer = IMAGE_PROCESSORS[layer_class].from_config(
                    layer)
            else:
                raise ValueError(
                    "The layer `{}` is not supported.".format(layer_class))
            created_layers[layer_name] = created_layer

            # Gather layer inputs.
            inbound_nodes_data = layer.inbound_nodes
            input_tensors = []
            for input_data in inbound_nodes_data:
                in_layer_name, in_node_index, in_tensor_index = cls.get_node_data(
                    input_data)
                if len(input_data) == 3:
                    kwargs = {}
                elif len(input_data) == 4:
                    kwargs = input_data[3]
                else:
                    raise ValueError('Improperly formatted model config.')
                if in_layer_name not in created_layers:
                    raise ValueError('Missing layer: ' + in_layer_name)
                inbound_layer = created_layers[in_layer_name]
                inbound_node = inbound_layer.inbound_nodes[in_node_index]
                input_tensors.append(
                    inbound_node.output_tensors[in_tensor_index])
            # Call layer on its inputs, thus creating the node
            # and building the layer if needed.
            if input_tensors:
                if len(input_tensors) == 1:
                    created_layer(input_tensors[0], **kwargs)
                else:
                    created_layer(input_tensors, **kwargs)

        for layer in config.layers:
            process_layer(layer)

        name = config.name
        input_tensors = []
        output_tensors = []
        for layer_data in config.input_layers:
            layer_name, node_index, tensor_index = cls.get_node_data(
                layer_data)
            assert layer_name in created_layers, "Layer `{}` not found".format(
                layer_name)
            layer = created_layers[layer_name]
            layer_output_tensors = layer.inbound_nodes[
                node_index].output_tensors
            input_tensors.append(layer_output_tensors[tensor_index])
        for layer_data in config.output_layers:
            layer_name, node_index, tensor_index = cls.get_node_data(
                layer_data)
            assert layer_name in created_layers
            layer = created_layers[layer_name]
            layer_output_tensors = layer.inbound_nodes[
                node_index].output_tensors
            output_tensors.append(layer_output_tensors[tensor_index])
        return cls(inputs=input_tensors, outputs=output_tensors, name=name)
コード例 #11
0
	def build(self, rgb, num_classes, temp_softmax, seed,train_mode):
                
                K.set_learning_phase(True)
    		# conv1_1
		with tf.name_scope('mentee_conv1_1') as scope:
			kernel = tf.Variable(tf.truncated_normal([3, 3, self.num_channels, 64], dtype=tf.float32,
													 stddev=1e-2, seed = seed), trainable = self.trainable, name='mentee_weights')
			conv = tf.nn.conv2d(rgb, kernel, [1, 1, 1, 1], padding='SAME')
			biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
								 trainable= self.trainable, name='mentee_biases')
			out = tf.nn.bias_add(conv, biases)
                        #out = self.extra_regularization(out)

			self.conv1_1 = tf.nn.relu(out, name=scope)
                        #self.conv1_1 = BatchNormalization(axis = -1, name= 'mentee_bn_conv1_1')(self.conv1_1)
			self.parameters += [kernel, biases]
			
		self.pool1 = tf.nn.max_pool(self.conv1_1,
									ksize=[1, 2, 2, 1],
									strides=[1, 2, 2, 1],
									padding='SAME',
									name='pool1')
                #conv2_1
		with tf.name_scope('mentee_conv2_1') as scope:
			kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
													 stddev=1e-2, seed = seed), trainable = self.trainable, name='mentee_weights')
			conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
			biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
								trainable= self.trainable, name='mentee_biases')
			out = tf.nn.bias_add(conv, biases)
			self.conv2_1 = tf.nn.relu(out, name=scope)
                        #self.conv2_1 = BatchNormalization(axis = -1, name= 'mentee_bn_conv2_1')(self.conv2_1)
			self.parameters += [kernel, biases]

		self.pool2 = tf.nn.max_pool(self.conv2_1,
									ksize=[1, 2, 2, 1],
									strides=[1, 2, 2, 1],
									padding='SAME',
									name='pool2')
		with tf.name_scope('mentee_conv3_1') as scope:
			kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
													 stddev=1e-2, seed = seed), trainable = self.trainable, name='mentee_weights')
			conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
			biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
								trainable= self.trainable, name='mentee_biases')
			out = tf.nn.bias_add(conv, biases)
			self.conv3_1 = tf.nn.relu(out, name=scope)
                        #self.conv3_1 = BatchNormalization(axis = -1, name= 'mentee_bn_conv3_1')(self.conv3_1)
			self.parameters += [kernel, biases]

		self.pool3 = tf.nn.max_pool(self.conv3_1,
									ksize=[1, 2, 2, 1],
									strides=[1, 2, 2, 1],
									padding='SAME',
									name='pool3')

		with tf.name_scope('mentee_conv4_1') as scope:
			kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
													 stddev=1e-2, seed= seed), trainable = self.trainable, name='mentee_weights')
			conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
			biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
								trainable=self.trainable, name='mentee_biases')
			out = tf.nn.bias_add(conv, biases)
			self.conv4_1 = tf.nn.relu(out, name=scope)
                        #self.conv4_1 = BatchNormalization(axis = -1, name= 'mentee_bn_conv4_1')(self.conv4_1)
			self.parameters += [kernel, biases]

		self.pool4 = tf.nn.max_pool(self.conv4_1,
									ksize=[1, 2, 2, 1],
									strides=[1, 2, 2, 1],
									padding='SAME',
									name='pool4')
		with tf.name_scope('mentee_conv5_1') as scope:
			kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
													 stddev=1e-2, seed = seed), trainable = self.trainable, name='mentee_weights')
			conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
			biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
								trainable=self.trainable, name='mentee_biases')
			out = tf.nn.bias_add(conv, biases)
			self.conv5_1 = tf.nn.relu(out, name=scope)
                        #self.conv5_1 = BatchNormalization(axis = -1, name= 'mentee_bn_conv5_1')(self.conv5_1)
			self.parameters += [kernel, biases]
		
                self.pool5 = tf.nn.max_pool(self.conv5_1,
									ksize=[1, 2, 2, 1],
									strides=[1, 2, 2, 1],
									padding='SAME',
									name='pool5')
		with tf.name_scope('mentee_conv6_1') as scope:
			kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
													 stddev=1e-2, seed = seed), trainable = self.trainable, name='mentee_weights')
			conv = tf.nn.conv2d(self.pool5, kernel, [1, 1, 1, 1], padding='SAME')
			biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
								trainable=self.trainable, name='mentee_biases')
			out = tf.nn.bias_add(conv, biases)
			self.conv6_1 = tf.nn.relu(out, name=scope)
                        #self.conv6_1 = BatchNormalization(axis = -1, name= 'mentee_bn_conv6_1')(self.conv6_1)
			self.parameters += [kernel, biases]
		
                self.pool6 = tf.nn.max_pool(self.conv6_1,
									ksize=[1, 2, 2, 1],
									strides=[1, 2, 2, 1],
									padding='SAME',
									name='pool6')

                # fc1
		with tf.name_scope('mentee_fc1') as scope:
			shape = int(np.prod(self.pool6.get_shape()[1:]))
			fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
														 dtype=tf.float32, stddev=1e-2, seed = seed), trainable = self.trainable,name='mentee_weights')
			fc1b = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32),
								 trainable=self.trainable, name='mentee_biases')
			pool6_flat = tf.reshape(self.pool6, [-1, shape])
			fc1l = tf.nn.bias_add(tf.matmul(pool6_flat, fc1w), fc1b)
			self.fc1 = tf.nn.relu(fc1l)
                        #self.fc1 = BatchNormalization(axis = -1, name= 'mentee_bn_fc1')(self.fc1)
                        #if train_mode == True:
                        print("Traine_mode is true")
                        #self.fc1 = tf.nn.dropout(self.fc1, 0.5, seed = seed)
			self.parameters += [fc1w, fc1b]
		
                with tf.name_scope('mentee_fc2') as scope:
			fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
														 dtype=tf.float32, stddev=1e-2, seed = seed), trainable = self.trainable,name='mentee_weights')
			fc2b = tf.Variable(tf.constant(0.0, shape=[4096], dtype=tf.float32),
								 trainable=self.trainable, name='mentee_biases')
			fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
			self.fc2 = tf.nn.relu(fc2l)
                        #self.fc2 = BatchNormalization(axis = -1, name= 'mentee_bn_fc2')(self.fc2)

                        """
                            Dropout and BatchNormalization are added to regularize the network and perform better by generalizing well.
                            However, to demonstrate knowledge transfer effectiveness, no other regularizers are added.
                        """
                        #if train_mode == True:
                        #self.fc2 = tf.nn.dropout(self.fc2, 0.5, seed = seed)
			self.parameters += [fc2w, fc2b]
                
                with tf.name_scope('mentee_fc3') as scope:
			fc3w = tf.Variable(tf.truncated_normal([4096, num_classes],
														 dtype=tf.float32, stddev=1e-2, seed = seed), trainable = self.trainable,name='mentee_weights')
			fc3b = tf.Variable(tf.constant(0.0, shape=[num_classes], dtype=tf.float32),
								 trainable=self.trainable, name='mentee_biases')
			self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
			#self.fc3 = tf.nn.relu(fc3l)
			self.parameters += [fc3w, fc3b]
                
                self.softmax = tf.nn.softmax(self.fc3l/temp_softmax)
                return self
コード例 #12
0
os.chdir(r'D:/drivers/')

from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers.core import Dense, Dropout, Flatten
from tensorflow.contrib.keras.python.keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from tensorflow.contrib.keras.python.keras.layers.pooling import AveragePooling2D
from tensorflow.contrib.keras.python.keras.layers import InputLayer
from tensorflow.contrib.keras.python.keras.models import load_model
from tensorflow.contrib.keras.python.keras import optimizers
from tensorflow.contrib.keras.python.keras.callbacks import EarlyStopping
from tensorflow.contrib.keras.python.keras.callbacks import ModelCheckpoint
from tensorflow.contrib.keras.python.keras.callbacks import History
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.layers.core import Lambda
import tensorflow as tf
K.set_learning_phase(0)


def load_image(filepath, size):
    """"args: filepath, image size
    
    Uses OpenCV package to transform image into numerical format.
    
    Returns: list of shape size*size*3
    """
    image = cv2.imread(filepath)
    res = cv2.resize(image, (size, size), cv2.INTER_LINEAR)
    return res


def load_train_data(size, fp='D:\\drivers\\train'):
コード例 #13
0
def cnn_sentiment_model(inputs,
                        nb_words,
                        embedding_dim=300,
                        static_embedding=True,
                        embedding_weights=None,
                        filter_hs=None,
                        nb_filters=100,
                        emb_size=100,
                        hidden_dropout=0.2,
                        is_training=True,
                        augmentation_function=None,
                        l2_weight=1e-4,
                        img_shape=None,
                        new_shape=None,
                        image_summary=False,
                        batch_norm_decay=0.99,
                        seed=0,
                        embedding_dropout=0.2):
    from tensorflow.contrib.keras.python.keras.layers import Embedding, Input, Convolution1D, MaxPooling1D, Flatten, \
        Dense, Dropout, Activation
    from tensorflow.contrib.keras.python.keras.initializers import glorot_uniform
    from tensorflow.contrib.keras.python.keras.layers.merge import Concatenate

    from tensorflow.contrib.keras.python.keras import backend as K
    K.set_learning_phase(1 if is_training else 0)

    sequence_length = img_shape[0]

    if filter_hs is None:
        filter_hs = [3, 4, 5]

    model = inputs

    def ci(shape, dtype=None, partition_info=None):
        assert shape[0] == embedding_weights.shape[0] and shape[
            1] == embedding_weights.shape[
                1], 'Shapes are not equal required={} init value={}'.format(
                    shape, embedding_weights.shape)
        return embedding_weights

    model = Embedding(nb_words,
                      embedding_dim,
                      input_length=sequence_length,
                      trainable=(not static_embedding),
                      embeddings_initializer='uniform'
                      if embedding_weights is None else ci)(model)
    if embedding_dropout > 0.0:
        model = Dropout(embedding_dropout, seed=seed)(model,
                                                      training=is_training)

    convs = list()
    for fsz in filter_hs:
        conv = Convolution1D(
            filters=nb_filters,
            kernel_size=fsz,
            padding='valid',
            activation='relu',
            kernel_initializer=glorot_uniform(seed=seed))(model)
        pool = MaxPooling1D(pool_size=sequence_length - fsz + 1)(conv)
        flatten = Flatten()(pool)
        convs.append(flatten)

    if len(filter_hs) > 0:
        graph_out = Concatenate()(convs)
    else:
        graph_out = convs[0]

    model = graph_out

    model = Dense(emb_size,
                  kernel_initializer=glorot_uniform(seed=seed))(model)
    model = Dropout(hidden_dropout, seed=seed)(model, training=is_training)
    model = Activation('relu')(model)

    return model