示例#1
0
    def __init__(self, s_date, n_frame):
        self.n_epoch = 20
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
        self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
            self.n_epoch = 10
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
示例#2
0
def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 2
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(yTest[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model2_resnet',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.load(_path)
    pred = model.predict(xTest)

    df = pd.DataFrame(pred)
    df.to_csv(_path + ".csv")

    newList = pred.copy()
    newList = convert2(newList)
    if _CSV: makeCSV(newList)
    pred = convert2(pred)
    pred = convert3(pred)
    yTest = convert3(yTest)
    print(metrics.confusion_matrix(yTest, pred))
    print(metrics.classification_report(yTest, pred))
    print('Accuracy', accuracy_score(yTest, pred))
    print()
    if _wrFile: writeTest(pred)
    def run(self):

        # Real-time pre-processing of the image data
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()

        # Resnet model below:  Adapted from tflearn website
        self.n = 5 #32 layer resnet

        # Building Residual Network
        net = tflearn.input_data(shape=[None, 48, 48, 1], data_preprocessing=img_prep, data_augmentation=img_aug)
        net = tflearn.conv_2d(net, nb_filter=16, filter_size=3, regularizer='L2', weight_decay=0.0001)
        net = tflearn.residual_block(net, self.n, 16)
        net = tflearn.residual_block(net, 1, 32, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 32)
        net = tflearn.residual_block(net, 1, 64, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 64)
        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)

        # Regression
        net = tflearn.fully_connected(net, 7, activation='softmax')
        mom = tflearn.Momentum(learning_rate=0.1, lr_decay=0.0001, decay_step=32000, staircase=True, momentum=0.9)
        net = tflearn.regression(net, optimizer=mom,
                                 loss='categorical_crossentropy')

        self.model = tflearn.DNN(net, checkpoint_path='models/model_resnet_emotion',
                            max_checkpoints=10, tensorboard_verbose=0,
                            clip_gradients=0.)

        self.model.load('model.tfl')

        face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        cap = cv2.VideoCapture(0)

        #Main Loop where we will be capturing live webcam feed, crop image and process the image for emotion recognition on trained model
        while True:
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + w]
                roi_color = img[y:y + h, x:x + w]
                self.image_processing(roi_gray, img)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
    def __init__(self):
        self.len_past = 30
        #self.s_date = "20120101_20160330"
        #self.model_dir = '../model/tflearn/reg_l3_bn/big/%s/' % self.s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.05)
        input_layer = tflearn.input_data(shape=[None, 690], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        self.qty = {}
        self.day_last = {}
        self.currency = 100000000
示例#5
0
def generator(x, reuse=False):
    with tf.variable_scope('Generator', reuse=reuse):
        x = tflearn.fully_connected(x, n_units=7 * 7 * 128)
        x = tflearn.batch_normalization(x)
        x = tf.nn.tanh(x)
        x = tf.reshape(x, shape=[-1, 7, 7, 128])
        x = tflearn.upsample_2d(x, 2)
        x = tflearn.conv_2d(x, 64, 5, activation='tanh')
        x = tflearn.upsample_2d(x, 2)
        x = tflearn.conv_2d(x, 1, 5, activation='sigmoid')
        return x
def decoder(inputs):
    net = tflearn.fully_connected(inputs, 1200 * 32, name='DecFC1')
    net = tflearn.batch_normalization(net, name='DecBN1')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L1",net.get_shape()
    print "========================"

    net = tflearn.reshape(net, (-1, side1 // 2**2, side2 // 2**2, 32))
    net = tflearn.conv_2d(net, 32, 3, name='DecConv1')
    net = tflearn.batch_normalization(net, name='DecBN2')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L2",net.get_shape()
    print "========================"

    net = tflearn.conv_2d_transpose(net, 16, 3, [side1 // 2, side2 // 2],
                                        strides=2, padding='same', name='DecConvT1')
    net = tflearn.batch_normalization(net, name='DecBN3')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L3",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, name='DecConv2')
    net = tflearn.batch_normalization(net, name='DecBN4')
    net = tflearn.elu(net)
    print "========================"
    print "dec-L4",net.get_shape()
    print "========================"

    net = tflearn.conv_2d_transpose(net, channel, 3, [side1, side2],
                                        strides=2, padding='same', activation='sigmoid',
                                        name='DecConvT2')
    decode_layer = net
    print "========================"
    print "output layer",net.get_shape()
    print "========================"

    return [net,decode_layer]
def encoder(inputs,hidden_layer):
    net = tflearn.conv_2d(inputs, 16, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L1",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 16, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L2",net.get_shape()
    print "========================"

    net = tflearn.conv_2d(net, 32, 3, strides=2)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L3",net.get_shape()
    print "========================"
    net = tflearn.conv_2d(net, 32, 3, strides=1)
    net = tflearn.batch_normalization(net)
    net = tflearn.elu(net)
    print "========================"
    print "enc-L4",net.get_shape()
    print "========================"
    net = tflearn.flatten(net)
    #net = tflearn.fully_connected(net, nb_feature,activation="sigmoid")
    net = tflearn.fully_connected(net, nb_feature)
    hidden_layer = net
    net = tflearn.batch_normalization(net)
    net = tflearn.sigmoid(net)
    print "========================"
    print "hidden",net.get_shape()
    print "========================"

    return [net,hidden_layer]
示例#8
0
def resnet1(x, n=5):
    net = tflearn.conv_2d(x, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, 16)
    net = tflearn.residual_block(net, 1, 32, downsample=True)
    net = tflearn.residual_block(net, n - 1, 32)
    net = tflearn.residual_block(net, 1, 64, downsample=True)
    net = tflearn.residual_block(net, n - 1, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, 10, activation='softmax')

    return net
示例#9
0
def resnet(inputs, prob_fc, prob_conv, wd, training_phase=True):
    n = 5
    net = tflearn.conv_2d(inputs, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, 16)
    net = tflearn.residual_block(net, 1, 32, downsample=True)
    net = tflearn.residual_block(net, n - 1, 32)
    net = tflearn.residual_block(net, 1, 64, downsample=True)
    net = tflearn.residual_block(net, n - 1, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, 10, activation='linear')
    return net
    def conv1d_bn(self, input_tensor, size=1, dim=128, activation='tanh'):
        with tf.variable_scope('conv1d_bn' + str(self.conv1d_idx)):
            shape = input_tensor.get_shape().as_list()
            channels = shape[-1]
            net = tflearn.conv_1d(input_tensor,
                                  dim, [size, channels],
                                  strides=1,
                                  bias=False,
                                  padding='same',
                                  activation=activation)
            net = tflearn.batch_normalization(net)

            self.conv1d_idx += 1
            return net
示例#11
0
    def __init__(self, s_date, n_frame):
        self.n_epoch = 20
        prev_bd = int(s_date[:6]) - 1
        prev_ed = int(s_date[9:15]) - 1
        if prev_bd % 100 == 0: prev_bd -= 98
        if prev_ed % 100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
        self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 23 * n_frame],
                                         name='input')
        dense1 = tflearn.fully_connected(input_layer,
                                         400,
                                         name='dense1',
                                         activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n,
                                         100,
                                         name='dense2',
                                         activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output,
                                        optimizer='adam',
                                        loss='mean_square',
                                        metric='R2',
                                        learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
            self.n_epoch = 10
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
示例#12
0
def get_model(model_name):
    # First we load the network
    print("Setting up neural networks...")
    n = 18

    # Real-time data preprocessing
    print("Doing preprocessing...")
    img_prep = tflearn.ImagePreprocessing()
    img_prep.add_featurewise_zero_center(
        per_channel=True, mean=[0.573364, 0.44924123, 0.39455055])

    # Real-time data augmentation
    print("Building augmentation...")
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_crop([32, 32], padding=4)

    #Build the model (for 32 x 32)
    print("Shaping input data...")
    net = tflearn.input_data(shape=[None, 32, 32, 3],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)

    print("Carving Resnext blocks...")
    net = tflearn.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n - 1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n - 1, 64, 32)

    print("Erroding Gradient...")
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, 8, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net,
                             optimizer=opt,
                             loss='categorical_crossentropy')

    print("Structuring model...")
    model = tflearn.DNN(net, tensorboard_verbose=0, clip_gradients=0.)

    # Load the model from checkpoint
    print("Loading the model...")
    model.load(model_name)

    return model
示例#13
0
    def create_actor_network(self, state_dim, action_dim):
        inputs = input_data(shape=state_dim)
        net = conv_1d(inputs, 128, 2, 2)
        net = max_pool_1d(net, 2, 2, 'same')
        net = batch_normalization(net)

        net = conv_1d(net, 256, 2, 2)
        net = max_pool_1d(net, 2, 2, 'same')
        net = batch_normalization(net)

        shape = net.get_shape().as_list()
        net = fully_connected(net, 1024, activation='relu', regularizer='L2')
        net = dropout(net, 0.8)
        net = fully_connected(net, 1024, activation='relu', regularizer='L2')

        # Final layer weights are init to Uniform[-3e-3, 3e-3]
        w_init = initializations.uniform(minval=-0.003, maxval=0.003)
        out = fully_connected(net,
                              action_dim,
                              activation='softmax',
                              weights_init=w_init)
        # Scale output to -action_bound to action_bound
        scaled_out = tf.multiply(out, self.action_bound)
        return inputs, out, scaled_out
示例#14
0
def resn_unit(incoming,
              nb,
              growth,
              weight_init='variance_scaling',
              weight_decay=0.0001,
              name='dens_unit'):
    rens = incoming
    with tf.variable_scope(name):
        for i in range(nb):
            conn = rens
            bn1 = batch_normalization(rens, name='bn1')
            relu1 = tf.nn.relu(bn1, name='relu1')
            conv1 = conv_2d(relu1,
                            growth,
                            3,
                            weights_init=weight_init,
                            weight_decay=weight_decay,
                            name='conv1')
            bn2 = batch_normalization(conv1, name='bn2')
            relu2 = tf.nn.relu(bn2, name='relu2')
            conv2 = conv_2d(relu2,
                            growth,
                            3,
                            weights_init=weight_init,
                            weight_decay=weight_decay,
                            name='conv2')
            conn_bn = batch_normalization(conn, name='conn_bn')
            conn_relu = tf.nn.relu(conn_bn, name='conn_relu')
            conn_conv = conv_2d(conn_relu,
                                growth,
                                1,
                                weights_init=weight_init,
                                weight_decay=weight_decay,
                                name='conn_conv')
            rens = tf.add(conv2, conn_conv, name='rens')
        return rens
示例#15
0
def split(incoming, order, strides=1):

    out_channel = incoming.get_shape().as_list()[-1]
    incoming = tflearn.conv_2d(incoming,
                               out_channel,
                               filter_size=3,
                               strides=strides,
                               name='Conv2D_split_' + order,
                               bias=False,
                               regularizer='L2',
                               weight_decay=0.0001)
    incoming = tflearn.batch_normalization(incoming, name='BN_split_' + order)
    incoming = tf.nn.relu(incoming)

    return incoming
示例#16
0
    def create_critic_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        bn_inputs = tflearn.batch_normalization(inputs, trainable=True)
        action = tflearn.input_data(shape=[None, self.a_dim])
        bn_action = tflearn.batch_normalization(action, trainable=True)
        concat = lambda x: tf.concat([x[0], x[1]], axis=1)

        w_state_layer = tflearn.initializations.xavier(seed=RANDOM_SEED)
        state_layer = tflearn.fully_connected(bn_inputs,
                                              critic_hidden_layer1_size,
                                              activation='linear',
                                              weights_init=w_state_layer)
        concat_layer = concat([state_layer, bn_action])
        concat_layer = ResDense(
            concat_layer, (critic_hidden_layer1_size + self.a_dim),
            int((critic_hidden_layer1_size + self.a_dim) / 2))
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init_out = tflearn.initializations.uniform(minval=-3e-9,
                                                     maxval=3e-9,
                                                     seed=RANDOM_SEED)
        out = tflearn.fully_connected(concat_layer,
                                      REWARD_SPACE_DIM,
                                      weights_init=w_init_out)
        return inputs, action, out
示例#17
0
    def _construct_decoder_model(self):
        input_noise = tflearn.input_data(shape=[None, self.latent_dim],
                                         name='input_noise')
        for i, d in enumerate(reversed(self.hidden_dim)):
            if i == 0:
                decoder = tflearn.fully_connected(input_noise,
                                                  d,
                                                  activation=self.decoder_fn,
                                                  scope='decoder_layer_%d' %
                                                  (i + 1),
                                                  reuse=True)
                decoder = tflearn.batch_normalization(decoder,
                                                      scope='decoder_bn_%d' %
                                                      (i + 1),
                                                      reuse=True)
                decoder = tflearn.dropout(decoder, self.dropout_rate)
            else:
                decoder = tflearn.fully_connected(decoder,
                                                  d,
                                                  activation=self.decoder_fn,
                                                  scope='decoder_layer_%d' %
                                                  (i + 1),
                                                  reuse=True)
                decoder = tflearn.batch_normalization(decoder,
                                                      scope='decoder_bn_%d' %
                                                      (i + 1),
                                                      reuse=True)
                decoder = tflearn.dropout(decoder, self.dropout_rate)

        decoder = tflearn.fully_connected(decoder,
                                          self.input_dim,
                                          activation=self.squashing_fn,
                                          scope='decoder_output',
                                          reuse=True)
        self.decoder_model = tflearn.DNN(decoder,
                                         session=self.training_model.session)
示例#18
0
def BReG_NeXt(_X):
    """BReG_NeXt implementation. Returns feature map before softmax.
  """
    net = tflearn.conv_2d(_X, 32, 3, regularizer='L2', weight_decay=0.0001)
    net = residual_block(net, 7, 32, activation='elu')
    net = residual_block(net, 1, 64, downsample=True, activation='elu')
    net = residual_block(net, 8, 64, activation='elu')
    net = residual_block(net, 1, 128, downsample=True, activation='elu')
    net = residual_block(net, 7, 128, activation='elu')
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'elu')
    net = tflearn.global_avg_pool(net)
    # Regression
    logits = tflearn.fully_connected(net, n_classes, activation='linear')
    return logits
示例#19
0
文件: RCAE.py 项目: kiminh/AMAD
	def decoder(self,inputs,decode_layer):
		net = tflearn.fully_connected(inputs, self.hidden_dim, name='DecFC1')
		net = tflearn.batch_normalization(net, name='DecBN1')
		net = tflearn.elu(net)
		print "========================"
		print "dec-L1",net.get_shape()
		print "========================"

		net = tflearn.reshape(net, (-1, 1, 1, self.hidden_dim))
		net = tflearn.conv_2d(net, 32, 3, name='DecConv1')
		net = tflearn.batch_normalization(net, name='DecBN2')
		net = tflearn.elu(net)
		print "========================"
		print "dec-L2",net.get_shape()
		print "========================"
		net = tflearn.conv_2d_transpose(net, 16, 3, [1, self.hidden_dim],
		                                    strides=2, padding='same', name='DecConvT1')
		net = tflearn.batch_normalization(net, name='DecBN3')
		net = tflearn.elu(net)
		print "========================"
		print "dec-L3",net.get_shape()
		print "========================"
		net = tflearn.conv_2d(net, 16, 3, name='DecConv2')
		net = tflearn.batch_normalization(net, name='DecBN4')
		net = tflearn.elu(net)
		print "========================"
		print "dec-L4",net.get_shape()
		print "========================"
		net = tflearn.conv_2d_transpose(net, 1, 3, [1, self.hidden_dim],
		                                    strides=2, padding='same', activation='sigmoid',
		                                    name='DecConvT2')
		decode_layer = net
		print "========================"
		print "output layer",net.get_shape()
		print "========================"
		return [net,decode_layer]
示例#20
0
    def generator(self, x, reuse=False):
        s = self.img_size
        s2 = self.divide(s, 2)

        s4 = self.divide(s2, 2)
        s8 = self.divide(s4, 2)
        s16 = self.divide(s8, 2)

        with tf.variable_scope('Generator', reuse=reuse):
            x = tflearn.fully_connected(x, s16 * s16 * 512)
            x = tf.reshape(x, shape=[-1, s16, s16, 512])
            x = tflearn.dropout(x, 0.8)
            x = tflearn.batch_normalization(x)
            x = tflearn.conv_2d_transpose(x, 128, 5, [s8, s8], strides=[2, 2], activation='relu')
            self.noise_layer(x, 0.2)
            x = tflearn.batch_normalization(x)
            x = tflearn.conv_2d_transpose(x, 64, 5, [s4, s4], strides=[2, 2], activation='relu')
            self.noise_layer(x, 0.2)
            x = tflearn.batch_normalization(x)
            x = tflearn.conv_2d_transpose(x, 32, 5, [s2, s2], strides=[2, 2], activation='relu')
            self.noise_layer(x, 0.2)
            x = tflearn.batch_normalization(x)
            x = tflearn.conv_2d_transpose(x, 1, 2, [s, s], strides=[2, 2], activation='relu')
            return tf.nn.tanh(x)
示例#21
0
def resnet(dataset, img_prep, img_aug, X, Y, testX, testY, width, height,
           channel, class_num, filt, depth, epoch):
    # Building Residual Network
    layer = 1
    net = tflearn.input_data(shape=[None, width, height, channel],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    net = tflearn.conv_2d(net, filt, 3, regularizer='L2', weight_decay=0.0001)
    while (depth != 0):
        d_num = depth - (int)(depth / 100) * 100
        depth = (int)(depth / 100)
        if (layer == 1):
            net = tflearn.residual_block(net, d_num, filt)
        else:
            net = tflearn.residual_block(net, 1, filt, downsample=True)
            net = tflearn.residual_block(net, d_num - 1, filt)
        layer = layer + 1
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, class_num, activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net,
                             optimizer=mom,
                             loss='categorical_crossentropy')
    # Training
    model = tflearn.DNN(net,
                        checkpoint_path=('model_resnet_' + dataset),
                        max_checkpoints=10,
                        tensorboard_verbose=0,
                        clip_gradients=0.)
    model.fit(X,
              Y,
              n_epoch=epoch,
              validation_set=(testX, testY),
              snapshot_epoch=False,
              snapshot_step=500,
              show_metric=True,
              batch_size=128,
              shuffle=True,
              run_id=('resnet_' + dataset))
    aa = model.predict(testX)
    correct = 0
    for i in range(len(aa)):
        if (aa[i].index(max(aa[i])) == np.argmax(testY[i])):
            correct = correct + 1
    return correct / len(aa)
示例#22
0
def inference(x):
    n = 5
    net = tflearn.conv_2d(x, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n - 1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n - 1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, 5, activation=None)
    logits = net
    net = tflearn.activations.softmax(net)
    return logits, net
def build_tflearn_convnet_1():
    network = input_data(shape=[None, 48, 48, 1])
    network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = batch_normalization(network)
    network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
    network = max_pool_2d(network, 2)
    network = fully_connected(network, 256, activation='relu')
    network = dropout(network, 0.75)
    network = fully_connected(network, 7, activation='softmax')
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)
    return tflearn.DNN(network, tensorboard_verbose=2)
示例#24
0
def build_model(learning_rate=0.00001):
    tf.reset_default_graph()
    net1 = tflearn.input_data([None, shag])
    net1 = tflearn.batch_normalization(net1)
    net1 = tflearn.fully_connected(net1, k1, regularizer='L2')
    net1 = tflearn.dropout(net1, 0.8)
    net1 = tflearn.fully_connected(net1, k2, regularizer='L2')
    net1 = tflearn.dropout(net1, 0.8)
    net1 = tflearn.fully_connected(net1, len_kyrs, activation='softmax')
    net1 = tflearn.regression(net1,
                              optimizer='adam',
                              learning_rate=learning_rate,
                              loss='binary_crossentropy')

    model = tflearn.DNN(net1)
    return model
示例#25
0
    def model_emotion(self):
        # Real-time pre-processing of the image data
        img_prep = ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()

        # Real-time data augmentation
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()
        # img_aug.add_random_crop([48, 48], padding=8)

        # Building Residual Network
        net = tflearn.input_data(shape=[None, 48, 48, 1],
                                 data_preprocessing=img_prep,
                                 data_augmentation=img_aug)
        net = tflearn.conv_2d(net,
                              nb_filter=16,
                              filter_size=3,
                              regularizer='L2',
                              weight_decay=0.0001)
        net = tflearn.residual_block(net, self.n, 16)
        net = tflearn.residual_block(net, 1, 32, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 32)
        net = tflearn.residual_block(net, 1, 64, downsample=True)
        net = tflearn.residual_block(net, self.n - 1, 64)
        net = tflearn.batch_normalization(net)
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)

        # Regression
        net = tflearn.fully_connected(net, 7, activation='softmax')
        mom = tflearn.Momentum(learning_rate=0.1,
                               lr_decay=0.0001,
                               decay_step=32000,
                               staircase=True,
                               momentum=0.9)
        net = tflearn.regression(net,
                                 optimizer=mom,
                                 loss='categorical_crossentropy')

        self.model = tflearn.DNN(net,
                                 checkpoint_path='models/model_resnet_emotion',
                                 max_checkpoints=10,
                                 tensorboard_verbose=0,
                                 clip_gradients=0.)

        self.model.load('New_models/model_resnet_emotion-33500')
示例#26
0
def _model2():
    global yTest, img_aug
    tf.reset_default_graph()
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()
    net = input_data(shape=[None, inputSize, inputSize, dim],
                 name='input',
                 data_preprocessing=img_prep,
                 data_augmentation=img_aug)
    n = 3
    j = 64
    '''
    net = tflearn.conv_2d(net, j, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    '''
    net = tflearn.conv_2d(net, j, 7, strides = 2, regularizer='L2', weight_decay=0.0001)
    net = max_pool_2d(net, 2, strides=2)
    net = tflearn.residual_block(net, n, j)
    net = tflearn.residual_block(net, 1, j*2, downsample=True)
    net = tflearn.residual_block(net, n-1, j*2)
    net = tflearn.residual_block(net, 1, j*4, downsample=True)
    net = tflearn.residual_block(net, n-1, j*4)
    net = tflearn.residual_block(net, 1, j*8, downsample=True)
    net = tflearn.residual_block(net, n-1, j*8)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, len(Y[0]), activation='softmax')
    mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=mom,
                     loss='categorical_crossentropy')
    model = tflearn.DNN(net, checkpoint_path='model_resnet_cifar10',
                max_checkpoints=10, tensorboard_verbose=3, clip_gradients=0.)
    model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),snapshot_epoch=False,
              snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id= _id + 'artClassification')
    if modelStore: model.save(_id + '-model.tflearn')
示例#27
0
def architecture03(input, num_class):
    net = tflearn.conv_2d(input, 64, 3, activation='relu', bias=False)
    net = tflearn.residual_bottleneck(net, 3, 16, 64)
    net = tflearn.residual_bottleneck(net, 1, 32, 128, downsample=False)
    net = tflearn.residual_bottleneck(net, 2, 32, 128)
    net = tflearn.residual_bottleneck(net, 1, 64, 256, downsample=False)
    net = tflearn.residual_bottleneck(net, 2, 64, 256)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    net = tflearn.fully_connected(net, num_class, activation='softmax')

    net = tflearn.regression(net,
                             optimizer='momentum',
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
    return tflearn.DNN(net, tensorboard_verbose=0)
示例#28
0
    def _build_network(self, n, image_size):
        # Define the input to the network.

        img_prep = tflearn.ImagePreprocessing()
        img_prep.add_featurewise_zero_center(per_channel=True)

        # Real-time data augmentation.
        img_aug = tflearn.ImageAugmentation()
        img_aug.add_random_flip_leftright()

        net = tflearn.input_data(shape=[None, image_size[0], image_size[1], 3],
                                 data_preprocessing=img_prep,
                                 data_augmentation=img_aug)

        # Start with a normal convolutional layer.
        net = tflearn.conv_2d(net,
                              64,
                              3,
                              regularizer='L2',
                              weight_decay=0.0001)

        # Since this is a ResNet with <50 layers, we'll use regular residual blocks;
        # otherwise, we'd use residual bottleneck blocks instead.
        net = tflearn.residual_block(net, n, 64)
        net = tflearn.residual_block(net, 1, 128, downsample=True)
        net = tflearn.residual_block(net, n - 1, 128)
        net = tflearn.residual_block(net, 1, 256, downsample=True)
        net = tflearn.residual_block(net, n - 1, 256)

        # Perform batch normalization.
        net = tflearn.batch_normalization(net)

        # Activation at the end of the network pre-FC.
        net = tflearn.activation(net, 'relu')
        net = tflearn.global_avg_pool(net)

        net = tflearn.fully_connected(net, 2, activation='softmax')
        mom = tflearn.Momentum(0.1,
                               lr_decay=0.1,
                               decay_step=32000,
                               staircase=True)
        net = tflearn.regression(net,
                                 optimizer=mom,
                                 loss='categorical_crossentropy')

        return net
    def build_graph(self, input, expected, reuse, batch_size):

        input = tf.reshape(
            input,
            [-1, self.input_height, self.input_width, self.input_channels])

        input = tf.cast(input, tf.float32)

        network = input / 255.0

        images = network[0:20]
        images = tf.reshape(images, [
            self.input_frames, self.input_height, self.input_width,
            self.input_channels
        ])

        with tf.variable_scope('mainCNN', reuse=reuse):
            network = self.vggLayer(network, 'mainCNN', reuse)
            network = tf.reduce_mean(network, [1, 2])

        with tf.variable_scope('reshape', reuse=reuse):
            afterGBD = int(network.get_shape()[-1])
            network = tf.reshape(network, [-1, self.input_frames, afterGBD])

        with tf.variable_scope('mainRNN', reuse=reuse):
            network = self.build_RNN(network, reuse)

        with tf.variable_scope('fc_part', reuse=reuse):

            network = tflearn.batch_normalization(network, name='batch_fc')
            network = tflearn.fully_connected(network,
                                              1024,
                                              name='fc1',
                                              activation=tf.nn.tanh)

        with tf.variable_scope("mainRegression", reuse=reuse):
            norm_label = expected / 20.0
            prediction, total_loss = tflearn_old.models.linear_regression(
                network, norm_label)
            output = prediction * 20.

        return dict(prediction=output,
                    loss=total_loss,
                    images=None,
                    attention=None)
示例#30
0
def inference(inputs, n_class=1000, finetuning=False):

    block_list = [3, 4, 6, 3]  #50-layer
    # block_list = [3, 4, 23, 3] #101-layer
    n_feature = [256, 512, 1024, 2048]

    end_points = {}
    # Building Residual Network
    with tf.variable_scope('input', reuse=tf.AUTO_REUSE):
        net = tflearn.conv_2d(inputs,
                              nb_filter=64,
                              filter_size=7,
                              strides=2,
                              bias=False,
                              regularizer='L2',
                              weight_decay=0.0001)  #112,64
        net = tflearn.batch_normalization(net)
        net = tf.nn.relu(net)
        print(net)  # 112*112
        net = tflearn.max_pool_2d(net, kernel_size=3, strides=2)  #56,64
        print(net)  # 56*56
    end_points['conv-1'] = net
    for i in range(4):
        downsample = False if i == 0 else True
        net = resnext_block(net,
                            nb_blocks=block_list[i],
                            out_channels=n_feature[i],
                            cardinality=32,
                            downsample=downsample,
                            scope="block_" + str(i))
        print(net)
        end_points['block-' + str(i)] = net

    net = tflearn.global_avg_pool(net)
    print(net)
    with tf.variable_scope('FC', reuse=tf.AUTO_REUSE):
        net = tflearn.fully_connected(net,
                                      n_class,
                                      weights_init='uniform_scaling',
                                      regularizer='L2',
                                      weight_decay=0.0001,
                                      restore=(not finetuning))
    print(net)

    return net
示例#31
0
	def build_network(self, num_classes, input_shape, model):
		network = tflearn.input_data(shape=[None, input_shape[0], input_shape[1], input_shape[2]])
		if model == 'DeepFace':
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 32, 11, strides=1, padding='VALID', name='Conv2d_1'))
			maxpool_1 = tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1')
			conv_2 = tflearn.relu(tflearn.conv_2d(maxpool_1, 32, 9, strides=1, padding='VALID', name='Conv2d_2'))

			local_1 = tflearn.relu(self.local(conv_2, 16, 9, 1, 'Local_1'))
			local_2 = tflearn.relu(self.local(local_1, 16, 7, 1, 'Local_2'))
			local_3 = tflearn.relu(self.local(local_2, 16, 5, 1, 'Local_3'))

			flatterned = tflearn.flatten(local_3)
			full_1 = tflearn.dropout(tflearn.relu(tflearn.fully_connected(flatterned, 4096, name='Fully_Connected_1')), 0.5)
			output = tflearn.fully_connected(full_1, num_classes, activation='softmax', name='Output')

		elif model == 'Song':
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 64, 5, strides=1, padding='VALID', name='Conv_1'))
			maxpool_1 = tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1')
			conv_2 = tflearn.relu(tflearn.conv_2d(maxpool_1, 64 , 5, strides=1, padding='VALID', name='Conv_2'))
			maxpool_2 = tflearn.max_pool_2d(conv_2, 3, strides=2, padding='VALID', name='MaxPool_2')

			local_1 = tflearn.dropout(tflearn.relu(self.local(maxpool_2, 32, 3, 1, 'Local_1')), 1)
			local_2 = tflearn.dropout(tflearn.relu(self.local(local_1, 32, 3, 1, 'Local_2')), 1)
			flatterned = tflearn.flatten(local_2)
			output = tflearn.fully_connected(flatterned, num_classes, activation='softmax', name='Output')

		else:
			conv_1 = tflearn.relu(tflearn.conv_2d(network, 64, 7, strides=2, bias=True, padding='VALID', name='Conv2d_1'))
			maxpool_1 = tflearn.batch_normalization(tflearn.max_pool_2d(conv_1, 3, strides=2, padding='VALID', name='MaxPool_1'))

			conv_2a = tflearn.relu(tflearn.conv_2d(maxpool_1, 96, 1, strides=1, padding='VALID', name='Conv_2a_FX1'))
			maxpool_2a = tflearn.max_pool_2d(maxpool_1, 3, strides=1, padding='VALID', name='MaxPool_2a_FX1')
			conv_2b = tflearn.relu(tflearn.conv_2d(conv_2a, 208, 3, strides=1, padding='VALID', name='Conv_2b_FX1'))
			conv_2c = tflearn.relu(tflearn.conv_2d(maxpool_2a, 64, 1, strides=1, padding='VALID', name='Conv_2c_FX1'))
			FX1_out = tflearn.merge([conv_2b, conv_2c], mode='concat', axis=3, name='FX1_out')

			conv_3a = tflearn.relu(tflearn.conv_2d(FX1_out, 96, 1, strides=1, padding='VALID', name='Conv_3a_FX2'))
			maxpool_3a = tflearn.max_pool_2d(FX1_out, 3, strides=1, padding='VALID', name='MaxPool_3a_FX2')
			conv_3b = tflearn.relu(tflearn.conv_2d(conv_3a, 208, 3, strides=1, padding='VALID', name='Conv_3b_FX2'))
			conv_3c = tflearn.relu(tflearn.conv_2d(maxpool_3a, 64, 1, strides=1, padding='VALID', name='Conv_3c_FX2'))
			FX2_out = tflearn.merge([conv_3b, conv_3c], mode='concat', axis=3, name='FX2_out')
			net = tflearn.flatten(FX2_out)
			output = tflearn.fully_connected(net, num_classes, activation='softmax', name='Output')

		return tflearn.regression(output, optimizer='Adam', loss='categorical_crossentropy', learning_rate=0.000001)
示例#32
0
def add_layer(net, lay):
    if lay[0] == 'down_conv_2d':
        net = tfl.conv_2d(net,
                          nb_filter=lay[1],
                          strides=[1, 2, 2, 1],
                          filter_size=lay[2],
                          activation=lay[3])
    elif lay[0] == 'conv_2d':
        net = tfl.conv_2d(net,
                          nb_filter=lay[1],
                          filter_size=lay[2],
                          activation=lay[3])
    elif lay[0] == 'flatten':
        s = net.get_shape()
        net = tf.reshape(net, [tf.shape(net)[0], s[1] * s[2] * s[3]])
    elif lay[0] == 'fully_connected':
        net = tfl.fully_connected(net, n_units=lay[1], activation=lay[2])
    elif lay[0] == 'expand':
        net = tf.reshape(net, [
            tf.shape(net)[0], lay[1], lay[2],
            net.get_shape()[1] / lay[1] / lay[2]
        ])
    elif lay[0] == 'up_conv_2d':
        net = tfl.conv_2d_transpose(net,
                                    nb_filter=lay[1],
                                    filter_size=lay[2],
                                    strides=[1, 2, 2, 1],
                                    output_shape=[lay[3], lay[4]],
                                    activation=lay[5],
                                    padding="same")
    elif lay[0] == 'input_layer':
        image_prep = tfl.ImagePreprocessing()
        image_prep.add_featurewise_stdnorm(per_channel=True,
                                           std=0.24051991589344662)
        image_prep.add_featurewise_zero_center(per_channel=True,
                                               mean=0.14699117337640238)

        net = tfl.layers.input_data(shape=[None, lay[1], lay[2]],
                                    data_preprocessing=image_prep)
        net = tf.expand_dims(net, axis=-1)

    if lay[-1] == 'batch_norm':
        net = tfl.batch_normalization(net)

    return net
    def aconv1d_bn(self,
                   input_tensor,
                   size=7,
                   dim=1,
                   rate=2,
                   activation='tanh'):
        with tf.variable_scope('aconv1d_bn' + str(self.atrous2d_idx)):
            shape = input_tensor.get_shape().as_list()
            net = tflearn.layers.conv.atrous_conv_2d(tf.expand_dims(
                input_tensor, dim=1),
                                                     shape[-1], [1, size],
                                                     bias=False,
                                                     activation=activation)
            net = tf.squeeze(net, [1])
            net = tflearn.batch_normalization(net)

            self.atrous2d_idx += 1
            return net
示例#34
0
def create_model():
    # Building Wide Residual Network

    assert ((depth - 4) % 6 == 0)
    n = (depth - 4) / 6

    n_stages = [16, 16 * k, 32 * k, 64 * k]

    net = tflearn.input_data(shape=[None, 32, 32, 3],
                             data_preprocessing=img_prep,
                             data_augmentation=img_aug)
    conv1 = conv_2d(net,
                    n_stages[0],
                    3,
                    activation='linear',
                    bias=False,
                    regularizer='L2',
                    weight_decay=0.0001)

    # Add wide residual blocks
    block_fn = _wide_basic
    conv2 = _layer(block_fn,
                   n_input_plane=n_stages[0],
                   n_output_plane=n_stages[1],
                   count=n,
                   stride=(1, 1))(conv1)  # "Stage 1 (spatial size: 32x32)"
    conv3 = _layer(block_fn,
                   n_input_plane=n_stages[1],
                   n_output_plane=n_stages[2],
                   count=n,
                   stride=(2, 2))(conv2)  # "Stage 2 (spatial size: 16x16)"
    conv4 = _layer(block_fn,
                   n_input_plane=n_stages[2],
                   n_output_plane=n_stages[3],
                   count=n,
                   stride=(2, 2))(conv3)  # "Stage 3 (spatial size: 8x8)"

    net = tflearn.batch_normalization(conv4)
    net = tflearn.activation(net, 'twins_relu')
    net = tflearn.avg_pool_2d(net, 8)
    #net = tflearn.avg_pool_2d(net, kernel_size=8, strides=1, padding='same')
    net = tflearn.fully_connected(net, 10, activation='softmax')

    return net
示例#35
0
    def DavidSpencer(self, tensorWidth, tensorHeight, tensorDepth):
        g = tf.Graph()
        with g.as_default():
            conv_net = input_data(shape=[None, tensorWidth, tensorHeight, tensorDepth])
            conv_net = conv_2d(conv_net,nb_filter=32,filter_size=5, activation='relu', bias=True)
            conv_net = batch_normalization(conv_net)
            conv_net = max_pool_2d(conv_net, 4)
            conv_net = dropout(conv_net, 0.5)
            conv_net = fully_connected(conv_net, 100, activation='relu')
            conv_net = dropout(conv_net, 0.5)


            conv_net = fully_connected(conv_net, 2, activation='softmax')
            conv_net = regression(conv_net, optimizer='sgd', learning_rate=0.01, loss='categorical_crossentropy',
                                  )

            model = tflearn.DNN(conv_net, tensorboard_verbose=0)

        return model, g
示例#36
0
 def build_residual_network(self, network, res_n=5):
     #     data_augmentation=self.generate_image_augumentation())
     network = tflearn.conv_2d(network, 16, 3, regularizer='L2',
                               weight_decay=0.0001)
     network = tflearn.residual_block(network, res_n, 16)
     network = tflearn.residual_block(network, 1, 32, downsample=True)
     network = tflearn.residual_block(network, res_n - 1, 32)
     network = tflearn.residual_block(network, 1, 64, downsample=True)
     network = tflearn.residual_block(network, res_n - 1, 64)
     network = tflearn.batch_normalization(network)
     network = tflearn.activation(network, 'relu')
     network = tflearn.global_avg_pool(network)
     # Regression
     network = tflearn.fully_connected(network, 2, activation='softmax')
     mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000,
                            staircase=True)
     network = tflearn.regression(network, optimizer=mom,
                                  loss='categorical_crossentropy')
     return network
示例#37
0
 def create_actor_network(self):
     inputs = tflearn.input_data(shape=[None, self.s_dim])
     bn_inputs = tflearn.batch_normalization(inputs, trainable=True)
     w_init_layer1 = tflearn.initializations.xavier(seed=RANDOM_SEED)
     layer1 = tflearn.fully_connected(bn_inputs,
                                      actor_hidden_layer1_size,
                                      activation='linear',
                                      weights_init=w_init_layer1)
     layer2 = ResDense(layer1, actor_hidden_layer1_size,
                       actor_hidden_layer2_size)
     w_init_out = tflearn.initializations.uniform(minval=-3e-9,
                                                  maxval=3e-9,
                                                  seed=RANDOM_SEED)
     out = tflearn.fully_connected(layer2,
                                   self.a_dim,
                                   weights_init=w_init_out)
     out = tflearn.activation(out, 'tanh')
     scaled_out = tf.multiply(out, self.action_bound)
     return inputs, out, scaled_out
示例#38
0
def resNet(nLabels, nFreq, nTime, featureMap=False):
	n=5
	tflearn.init_graph(gpu_memory_fraction=0.5, seed=6969)
	net = tflearn.input_data(shape=[None, nFreq, nTime, 1])
	net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
	res1 = tflearn.residual_block(net, n, 16)
	res2 = tflearn.residual_block(res1, 1, 32, downsample=True)
	res3 = tflearn.residual_block(res2, n-1, 32)
	res4 = tflearn.residual_block(res3, 1, 64, downsample=True)
	res5 = tflearn.residual_block(res4, n-1, 64)
	out = tflearn.batch_normalization(res5)
	out = tflearn.activation(out, 'relu')
	out = tflearn.global_avg_pool(out)
	out = tflearn.fully_connected(out, nLabels, activation='softmax')
	out = tflearn.regression(out, optimizer='adam',loss='categorical_crossentropy',learning_rate=0.001)
	if featureMap:
		return res1, res2, res3, res4, res5, out
	else:
		return out
示例#39
0
def transition_layer(x, scope, reduction=0.5, keep_prob=1):
    out_filters = int(int(x.get_shape()[-1]) * reduction)
    with tf.name_scope(scope):
        x = tflearn.batch_normalization(x, scope=scope + '_batch1')
        x = tf.nn.relu(x)
        x = tflearn.conv_2d(x,
                            nb_filter=out_filters,
                            filter_size=1,
                            strides=1,
                            padding='same',
                            activation='linear',
                            bias=False,
                            scope=scope + '_conv1',
                            regularizer='L2',
                            weight_decay=1e-4)
        x = tflearn.dropout(x, keep_prob=keep_prob)
        x = tflearn.avg_pool_2d(x, kernel_size=2, strides=2, padding='valid')
        print(scope, x)
        return x
示例#40
0
文件: models.py 项目: gcm0621/pygta5
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
示例#41
0
def residual_bottleneck(incoming, nb_blocks, bottleneck_size, out_channels,
                        downsample=False, downsample_strides=2,
                        activation='relu', batch_norm=True, bias=True,
                        weights_init='variance_scaling', bias_init='zeros',
                        regularizer='L2', weight_decay=0.0001,
                        trainable=True, restore=True, name="ResidualBottleneck"):
    """ Residual Bottleneck.

    A residual bottleneck block as described in MSRA's Deep Residual Network
    paper. Full pre-activation architecture is used here.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        bottleneck_size: `int`. The number of convolutional filter of the
            bottleneck convolutional layer.
        out_channels: `int`. The number of convolutional filters of the
            layers surrounding the bottleneck layer.
        downsample: `bool`. If True, apply downsampling using
            'downsample_strides' for strides.
        downsample_strides: `int`. The strides to use when downsampling.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'DeepBottleneck'.

    References:
        - Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
            Zhang, Shaoqing Ren, Jian Sun. 2015.
        - Identity Mappings in Deep Residual Networks. Kaiming He, Xiangyu
            Zhang, Shaoqing Ren, Jian Sun. 2015.

    Links:
        - [http://arxiv.org/pdf/1512.03385v1.pdf]
            (http://arxiv.org/pdf/1512.03385v1.pdf)
        - [Identity Mappings in Deep Residual Networks]
            (https://arxiv.org/pdf/1603.05027v2.pdf)

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.name_scope(name):
        for i in range(nb_blocks):

            identity = resnet

            if not downsample:
                downsample_strides = 1

            if batch_norm:
                resnet = tflearn.batch_normalization(resnet)
            resnet = tflearn.activation(resnet, activation)

            resnet = conv_2d(resnet, bottleneck_size, 1,
                             downsample_strides, 'valid',
                             'linear', bias, weights_init,
                             bias_init, regularizer, weight_decay,
                             trainable, restore)

            if batch_norm:
                resnet = tflearn.batch_normalization(resnet)
            resnet = tflearn.activation(resnet, activation)

            resnet = conv_2d(resnet, bottleneck_size, 3, 1, 'same',
                             'linear', bias, weights_init,
                             bias_init, regularizer, weight_decay,
                             trainable, restore)

            resnet = conv_2d(resnet, out_channels, 1, 1, 'valid',
                             activation, bias, weights_init,
                             bias_init, regularizer, weight_decay,
                             trainable, restore)

            # Downsampling
            if downsample_strides > 1:
                identity = tflearn.avg_pool_2d(identity, 1,
                                               downsample_strides)

            # Projection to new dimension
            if in_channels != out_channels:
                ch = (out_channels - in_channels)//2
                identity = tf.pad(identity,
                                  [[0, 0], [0, 0], [0, 0], [ch, ch]])
                in_channels = out_channels

                resnet = resnet + identity
                resnet = tflearn.activation(resnet, activation)

    return resnet
示例#42
0
文件: conv.py 项目: Snazz2001/tflearn
def shallow_residual_block(incoming, nb_blocks, out_channels,
                           downsample=False, downsample_strides=2,
                           activation='relu', batch_norm=True, bias=False,
                           weights_init='uniform_scaling', bias_init='zeros',
                           regularizer=None, weight_decay=0.0001,
                           trainable=True, restore=True,
                           name="ShallowResidualBlock"):
    """ Shallow Residual Block.

    A shallow residual block as described in MSRA's Deep Residual Network
    paper.

    Notice: Because TensorFlow doesn't support a strides > filter size,
    an average pooling is used as a fix, but decrease performances.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        out_channels: `int`. The number of convolutional filters of the
            convolution layers.
        downsample: `bool`. If True, apply downsampling using
            'downsample_strides' for strides.
        downsample_strides: `int`. The strides to use when downsampling.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'ShallowBottleneck'.

    References:
        Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
        Zhang, Shaoqing Ren, Jian Sun. 2015.

    Links:
        [http://arxiv.org/pdf/1512.03385v1.pdf]
        (http://arxiv.org/pdf/1512.03385v1.pdf)

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.name_scope(name):
        for i in range(nb_blocks):
            with tf.name_scope('ResidualBlock'):

                identity = resnet

                if downsample:
                    resnet = conv_2d(resnet, out_channels, 3,
                                     downsample_strides, 'same', 'linear',
                                     bias, weights_init, bias_init,
                                     regularizer, weight_decay, trainable,
                                     restore)
                else:
                    resnet = conv_2d(resnet, out_channels, 3, 1, 'same',
                                     'linear', bias, weights_init,
                                     bias_init, regularizer, weight_decay,
                                     trainable, restore)
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)
                resnet = tflearn.activation(resnet, activation)

                resnet = conv_2d(resnet, out_channels, 3, 1, 'same',
                                 'linear', bias, weights_init,
                                 bias_init, regularizer, weight_decay,
                                 trainable, restore)
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                # TensorFlow can't accept kernel size < strides, so using a
                # average pooling or resizing for downsampling.

                # Downsampling
                if downsample:
                    #identity = avg_pool_2d(identity, downsample_strides,
                    #                       downsample_strides)
                    size = resnet.get_shape().as_list()
                    identity = tf.image.resize_nearest_neighbor(identity,
                                                                [size[1],
                                                                 size[2]])

                # Projection to new dimension
                if in_channels != out_channels:
                    in_channels = out_channels
                    identity = conv_2d(identity, out_channels, 1, 1, 'same',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)

                resnet = resnet + identity
                resnet = tflearn.activation(resnet, activation)

    return resnet
示例#43
0
文件: conv.py 项目: Snazz2001/tflearn
def deep_bottleneck(incoming, nb_layers, nb_filter, bottleneck_size,
                    activation='relu', batch_norm=True, bias=False,
                    weights_init='uniform_scaling', bias_init='zeros',
                    regularizer=None, weight_decay=0.001, trainable=True,
                    restore=True, name="DeepBottleneck"):
    """ Deep Bottleneck.

    As described in MSRA's Deep Residual Network paper.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_layers: `int`. Number of layer blocks.
        nb_filter: `int`. The number of convolutional filters of the
            layers surrounding the bottleneck layer.
        bottleneck_size: `int`. The number of convolutional filter of the
            bottleneck convolutional layer.
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'DeepBottleneck'.

    """
    resnet = incoming
    # Build bottleneck block layers, this layer doesn't need
    # `build inference` as other layers, because it only uses
    # pre-existing layers, and doesn't define any new ops.
    with tf.name_scope(name):
        for i in range(nb_layers):
            with tf.name_scope('ResidualLayer'):
                with tf.name_scope("in"):
                    residual = conv_2d(resnet, bottleneck_size, 1, 1, 'valid',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)
                with tf.name_scope("bottleneck"):
                    residual = conv_2d(residual, bottleneck_size, 3, 1, 'same',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)
                with tf.name_scope("out"):
                    residual = conv_2d(residual, nb_filter, 1, 1, 'valid',
                                       'linear', bias, weights_init,
                                       bias_init, regularizer, weight_decay,
                                       trainable, restore)
                    if batch_norm:
                        residual = tflearn.batch_normalization(residual)
                    residual = tflearn.activation(residual, activation)

                resnet = resnet + residual

    return resnet
def conv_2d_BN(incoming, nb_filter, filter_size, strides=1, padding='same',
            activation='linear', bias=True, weights_init='xavier',
            bias_init='zeros', regularizer=None, weight_decay=0.001,
            trainable=True, restore=True, reuse=False, scope=None,
            name="Conv2D", batch_norm=False):
    """ Convolution 2D.
    Input:
        4-D Tensor [batch, height, width, in_channels].
    Output:
        4-D Tensor [batch, new height, new width, nb_filter].
    Arguments:
        incoming: `Tensor`. Incoming 4-D Tensor.
        nb_filter: `int`. The number of convolutional filters.
        filter_size: `int` or `list of int`. Size of filters.
        strides: 'int` or list of `int`. Strides of conv operation.
            Default: [1 1 1 1].
        padding: `str` from `"same", "valid"`. Padding algo to use.
            Default: 'same'.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'Conv2D'.
        batch_norm: If true, add batch normalization with default TFLearn 
            parameters before the activation layer 
    Attributes:
        scope: `Scope`. This layer scope.
        W: `Variable`. Variable representing filter weights.
        b: `Variable`. Variable representing biases.
    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
    filter_size = utils.autoformat_filter_conv2d(filter_size,
                                                 input_shape[-1],
                                                 nb_filter)
    strides = utils.autoformat_kernel_2d(strides)
    padding = utils.autoformat_padding(padding)

    # Variable Scope fix for older TF
    try:
        vscope = tf.variable_scope(scope, default_name=name, values=[incoming],
                                   reuse=reuse)
    except Exception:
        vscope = tf.variable_op_scope([incoming], scope, name, reuse=reuse)

    with vscope as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W', shape=filter_size, regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)

        # Track per layer variables
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b', shape=nb_filter, initializer=bias_init,
                            trainable=trainable, restore=restore)
            # Track per layer variables
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = tf.nn.conv2d(incoming, W, strides, padding)
        if b: inference = tf.nn.bias_add(inference, b)

        if batch_norm:
            inference = batch_normalization(inference)
        
        if isinstance(activation, str):
            if activation == 'softmax':
                shapes = inference.get_shape()

                inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
示例#45
0
# Data loading
from tflearn.datasets import cifar10
(X, Y), (testX, testY) = cifar10.load_data()
# Data pre-processing
X, mean = du.featurewise_zero_center(X)
X, std = du.featurewise_std_normalization(X)
testX = du.featurewise_zero_center(testX, mean)
testX = du.featurewise_std_normalization(testX, std)
Y = du.to_categorical(Y, 10)
testY = du.to_categorical(testY, 10)

# Building Residual Network
net = tflearn.input_data(shape=[None, 32, 32, 3])
net = tflearn.conv_2d(net, 32, 3)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.shallow_residual_block(net, 4, 32, regularizer='L2')
net = tflearn.shallow_residual_block(net, 1, 32, downsample=True,
                                     regularizer='L2')
net = tflearn.shallow_residual_block(net, 4, 64, regularizer='L2')
net = tflearn.shallow_residual_block(net, 1, 64, downsample=True,
                                     regularizer='L2')
net = tflearn.shallow_residual_block(net, 5, 128, regularizer='L2')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')
mom = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=16000, staircase=True)
net = tflearn.regression(net, optimizer=mom,
                         loss='categorical_crossentropy')
# Training
import tflearn.data_utils as du

if __name__ == "__main__":
	parser = argparse.ArgumentParser()
	parser.add_argument('--t', action='store', dest='test_path', type=str, help='Test Data Path')
	config = parser.parse_args()

	#Load Test data
	image_count = (3,6)
	patch_count = 20
	X = generate_patches(img2numpy_arr(config.test_path), image_count, patch_count)

	# Building Residual Network
	net = tl.input_data(shape=[None, 42, 42, 3])
	net = tl.conv_2d(net, 32, 3)
	net = tl.batch_normalization(net)
	net = tl.activation(net, 'relu')
	net = tl.shallow_residual_block(net, 4, 32, regularizer='L2')
	net = tl.shallow_residual_block(net, 1, 32, downsample=True,
												 regularizer='L2')
	net = tl.shallow_residual_block(net, 4, 64, regularizer='L2')
	net = tl.shallow_residual_block(net, 1, 64, downsample=True,
												 regularizer='L2')
	net = tl.shallow_residual_block(net, 5, 64, regularizer='L2')
	net = tl.global_avg_pool(net)
	
	# Regression
	net = tl.fully_connected(net, 9, activation='softmax')
	mom = tl.Momentum(0.1, lr_decay=0.1, decay_step=16000, staircase=True)
	net = tl.regression(net, optimizer=mom,
									 loss='categorical_crossentropy')
def fully_connected_BN(incoming, n_units, activation='linear', bias=True,
                    weights_init='truncated_normal', bias_init='zeros',
                    regularizer=None, weight_decay=0.001, trainable=True,
                    restore=True, reuse=False, scope=None,
                    name="FullyConnected", batch_norm=False):
    """ Fully Connected.
    A fully connected layer.
    Input:
        (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.
    Output:
        2D Tensor [samples, n_units].
    Arguments:
        incoming: `Tensor`. Incoming (2+)D Tensor.
        n_units: `int`, number of units for this layer.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        bias_init: `str` (name) or `Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'FullyConnected'.
        batch_norm: if True add a batch normalization layer before the activation 
            function.
    Attributes:
        scope: `Scope`. This layer scope.
        W: `Tensor`. Variable representing units weights.
        b: `Tensor`. Variable representing biases.
    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    n_inputs = int(np.prod(input_shape[1:]))

    # Build variables and inference.
    # Variable Scope fix for older TF
    try:
        vscope = tf.variable_scope(scope, default_name=name, values=[incoming],
                                   reuse=reuse)
    except Exception:
        vscope = tf.variable_op_scope([incoming], scope, name, reuse=reuse)

    with vscope as scope:
        name = scope.name

        W_init = weights_init
        if isinstance(weights_init, str):
            W_init = initializations.get(weights_init)()
        W_regul = None
        if regularizer:
            W_regul = lambda x: losses.get(regularizer)(x, weight_decay)
        W = vs.variable('W', shape=[n_inputs, n_units], regularizer=W_regul,
                        initializer=W_init, trainable=trainable,
                        restore=restore)
        tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        b = None
        if bias:
            if isinstance(bias_init, str):
                bias_init = initializations.get(bias_init)()
            b = vs.variable('b', shape=[n_units], initializer=bias_init,
                            trainable=trainable, restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, b)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(inference, [-1, n_inputs])

        inference = tf.matmul(inference, W)
        if batch_norm:
            inference = batch_normalization(inference)
            
        if b: inference = tf.nn.bias_add(inference, b)

        if isinstance(activation, str):
            inference = activations.get(activation)(inference)
        elif hasattr(activation, '__call__'):
            inference = activation(inference)
        else:
            raise ValueError("Invalid Activation.")

        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)

    # Add attributes to Tensor to easy access weights.
    inference.scope = scope
    inference.W = W
    inference.b = b

    # Track output tensor.
    #tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
示例#48
0
def deep_residual_block(
    incoming,
    nb_blocks,
    bottleneck_size,
    out_channels,
    downsample=False,
    downsample_strides=2,
    activation="relu",
    batch_norm=True,
    bias=False,
    weights_init="uniform_scaling",
    bias_init="zeros",
    regularizer=None,
    weight_decay=0.001,
    trainable=True,
    restore=True,
    name="DeepResidualBlock",
):
    """ Deep Residual Block.

    A deep residual block as described in MSRA's Deep Residual Network paper.

    Notice: Because TensorFlow doesn't support a strides > filter size,
    an average pooling is used as a fix, but decrease performances.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        bottleneck_size: `int`. The number of convolutional filter of the
            bottleneck convolutional layer.
        out_channels: `int`. The number of convolutional filters of the
            layers surrounding the bottleneck layer.
        downsample:
        downsample_strides:
        activation: `str` (name) or `Tensor`. Activation applied to this layer.
            (see tflearn.activations). Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        name: A name for this layer (optional). Default: 'DeepBottleneck'.

    References:
        Deep Residual Learning for Image Recognition. Kaiming He, Xiangyu
        Zhang, Shaoqing Ren, Jian Sun. 2015.

    Links:
        [http://arxiv.org/pdf/1512.03385v1.pdf]
        (http://arxiv.org/pdf/1512.03385v1.pdf)

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    with tf.name_scope(name):
        for i in range(nb_blocks):
            with tf.name_scope("ResidualBlock"):

                identity = resnet

                if downsample:
                    # Use average pooling, because TensorFlow conv_2d can't
                    # accept kernel size < strides.
                    resnet = avg_pool_2d(resnet, downsample_strides, downsample_strides)
                    resnet = conv_2d(
                        resnet,
                        bottleneck_size,
                        1,
                        1,
                        "valid",
                        activation,
                        bias,
                        weights_init,
                        bias_init,
                        regularizer,
                        weight_decay,
                        trainable,
                        restore,
                    )
                else:
                    resnet = conv_2d(
                        resnet,
                        bottleneck_size,
                        1,
                        1,
                        "valid",
                        activation,
                        bias,
                        weights_init,
                        bias_init,
                        regularizer,
                        weight_decay,
                        trainable,
                        restore,
                    )
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                resnet = conv_2d(
                    resnet,
                    bottleneck_size,
                    3,
                    1,
                    "same",
                    activation,
                    bias,
                    weights_init,
                    bias_init,
                    regularizer,
                    weight_decay,
                    trainable,
                    restore,
                )
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                resnet = conv_2d(
                    resnet,
                    out_channels,
                    1,
                    1,
                    "valid",
                    activation,
                    bias,
                    weights_init,
                    bias_init,
                    regularizer,
                    weight_decay,
                    trainable,
                    restore,
                )
                if batch_norm:
                    resnet = tflearn.batch_normalization(resnet)

                if downsample:
                    # Use average pooling, because TensorFlow conv_2d can't
                    # accept kernel size < strides.
                    identity = avg_pool_2d(identity, downsample_strides, downsample_strides)

                # Projection to new dimension
                if in_channels != out_channels:
                    in_channels = out_channels
                    identity = conv_2d(
                        identity,
                        out_channels,
                        1,
                        1,
                        "valid",
                        "linear",
                        bias,
                        weights_init,
                        bias_init,
                        regularizer,
                        weight_decay,
                        trainable,
                        restore,
                    )

                resnet = resnet + identity
                resnet = tflearn.activation(resnet, activation)

    return resnet