def test_convolution_layer(input_data):
    W = np.random.randn(1, 3, 2, 2)
    b = np.random.randn(1)

    myconv = Convolution(W, b)
    myresult = myconv.forward(input_data)

    bookconv = book_layers.Convolution(W, b)
    bookconvresult = bookconv.forward(input_data)

    print(f"test result of test_convolution_layer: {(myresult==bookconvresult).all()}")
def test_convolution_backward(input_data):
    W = np.random.randn(1, 3, 2, 2)
    b = np.random.randn(1)

    myconv = Convolution(W, b)
    myresult = myconv.forward(input_data)

    bookconv = book_layers.Convolution(W, b)
    bookconvresult = bookconv.forward(input_data)

    dx = np.random.randn(*myresult.shape)
    dx2 = dx.copy()
    my_b_r = myconv.backward(dx)
    book_b_r = bookconv.backward(dx2)
    #print(my_b_r)
    #print(book_b_r)
    different = (my_b_r-book_b_r)
    print(f"test result of test_pooling_layer: {different.max()<1e-10}")
示例#3
0
 def get_shapes(self):
     '''Draw layer shapes.'''
     shapes = []
     for i, layer in enumerate(self.layers):
         if 'input' in layer.name:
             shapes = np.append(
                 shapes,
                 Input(name=layer.name,
                       position=layer.position,
                       output_dim=layer.output_dim,
                       depth=layer.depth,
                       flatten=layer.flatten,
                       output_dim_label=layer.output_dim_label).draw())
         if 'dense' in layer.name:
             shapes = np.append(
                 shapes,
                 Dense(name=layer.name,
                       position=layer.position,
                       output_dim=layer.output_dim,
                       depth=layer.depth,
                       activation=layer.activation,
                       maxpool=layer.maxpool,
                       flatten=layer.flatten,
                       output_dim_label=layer.output_dim_label).draw())
         if 'conv' in layer.name:
             shapes = np.append(
                 shapes,
                 Convolution(
                     name=layer.name,
                     position=layer.position,
                     output_dim=layer.output_dim,
                     depth=layer.depth,
                     activation=layer.activation,
                     maxpool=layer.maxpool,
                     flatten=layer.flatten,
                     output_dim_label=layer.output_dim_label).draw())
         if 'output' in layer.name:
             shapes = np.append(
                 shapes,
                 Output(name=layer.name,
                        position=layer.position,
                        output_dim=layer.output_dim,
                        depth=layer.depth,
                        output_dim_label=layer.output_dim_label).draw())
         if i:
             shapes = np.append(
                 shapes,
                 Funnel(prev_position=self.layers[i - 1].position,
                        prev_depth=self.layers[i - 1].depth,
                        prev_output_dim=self.layers[i - 1].output_dim,
                        curr_position=layer.position,
                        curr_depth=layer.depth,
                        curr_output_dim=layer.output_dim,
                        color=(178.0 / 255, 178.0 / 255,
                               178.0 / 255)).draw())
     self.shapes = shapes
示例#4
0
def convModule(flow, filters, dropout, strides=(1, 1), s=(3, 3)): # {{{
	if mirroring:
		flow = CReLU()(flow)
	flow = Convolution(filters, s=s, initialisation=init, initKWArgs=initKWArgs, strides=strides, regFunction=regF, reg=regP)(flow)
	if observing and not resNet:
		flow = Observation()(flow)
	if not mirroring:
		flow = Activation('relu')(flow)
	if doDropout:
		flow = Dropout(dropout)(flow)
	return flow # }}}
示例#5
0
    def __init__(self,
                 input_dim=(1, 28, 28),  # (C, W, H)
                 filter_num=30,
                 filter_size=5,
                 filter_pad=0,
                 filter_stride=1,
                 hidden_size=100,
                 output_size=10,
                 weight_init_std=0.01
                 ):
        # input(N, C, W, H)
        # -> Conv(N, FN, conv_out_h, conv_out_w) -> ReLu
        # -> Pooling(N, FN , pool_out_h, pool_out_w)
        # -> Affine[flatten行う](N, hidden_layer) -> ReLu
        # -> Affine(N, output_layer) -> SoftMax

        # input_sizeは動的に決定(正方形を前提)
        input_size = input_dim[1]
        conv_output_size = (input_size + 2 * filter_pad - filter_size) / filter_stride + 1
        # FN * pool_out_h * pool_out_w
        pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2))

        self.params = {}

        # Conv
        # (input_size, C, W, H) -> (N, FilterNum, out_h, out_w)
        self.params['W1'] = \
            weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)

        # ReLu
        # Pool
        # Affine
        self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)

        # Relu
        # Affine
        self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        self.layers = OrderedDict()

        self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'], filter_stride, filter_pad)
        self.layers['ReLu1'] = ReLu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['ReLu2'] = ReLu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])

        self.last_layer = SoftmaxWithLoss()
    def __init__(self,
                 input_dim=(1, 28, 28),
                 conv_param={
                     'filter_num': 30,
                     'filter_size': 5,
                     'pad': 0,
                     'stride': 1
                 },
                 hidden_size=100,
                 output_size=10,
                 weight_init_std=0.01):
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size +
                            2 * filter_pad) / filter_stride + 1
        pool_output_size = int(filter_num * (conv_output_size / 2) *
                               (conv_output_size / 2))

        # 权重初始化
        self.params = {}
        self.params['W1'] = weight_init_std * \
                            np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)
        self.params['W2'] = weight_init_std * \
                            np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)
        self.params['W3'] = weight_init_std * \
                            np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        # 层生成
        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W1'],
                                           self.params['b1'],
                                           conv_param['stride'],
                                           conv_param['pad'])
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])

        self.last_layer = SoftmaxWithLoss()
        def inception_block(net, is_training, end_point, layers_config, layers, idx):
            with tf.variable_scope(end_point):
                with tf.variable_scope('Branch_0'):
                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(net, 'Conv3d_0a_1x1', layers_config['Branch_0'][0],
                        kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_0 = layers[idx].forward(); idx += 1

                with tf.variable_scope('Branch_1'):
                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(net, 'Conv3d_0a_1x1', layers_config['Branch_1'][0],
                        kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_1 = layers[idx].forward(); idx += 1
                    # 3x3x3 Conv, stride 1
                    layers[idx] = Convolution(branch_1, 'Conv3d_0b_3x3', layers_config['Branch_1'][1],
                        kernel_size=3, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_1 = layers[idx].forward(); idx += 1

                with tf.variable_scope('Branch_2'):
                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(net, 'Conv3d_0a_1x1', layers_config['Branch_2'][0],
                        kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_2 = layers[idx].forward(); idx += 1
                    # 3x3x3 Conv, stride 1
                    layers[idx] = Convolution(branch_2, 'Conv3d_0b_3x3', layers_config['Branch_2'][1],
                        kernel_size=3, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_2 = layers[idx].forward(); idx += 1

                with tf.variable_scope('Branch_3'):
                    # 3x3x3 Max-pool, stride 1, 1, 1
                    layers[idx] = Pooling(net, 'MaxPool3d_0a_3x3',
                        kernel_size=layers_config['Branch_3'][0], strides=layers_config['Branch_3'][1],
                        padding='SAME', pool='MAX')
                    branch_3 = layers[idx].forward(); idx += 1
                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(branch_3, 'Conv3d_0b_1x1', layers_config['Branch_3'][2],
                        kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_3 = layers[idx].forward(); idx += 1

                # Concat branch_[0-3]
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)

            return net, idx
示例#8
0
	def __init__(self, input_dim=(1,28,28),
				 conv_param={'filter_num':30, 'filter_size':5, 
				 			 'pad':0, 'stride':1},
				 hidden_size=100, output_size=10, weight_init_std=0.01):

		filter_num = conv_param['filter_num']
		filter_size = conv_param['filter_size']
		filter_pad = conv_param['filter_pad']
		filter_stride = conv_param['filter_stride']
		input_size = input_dim[1]
		# 入力サイズから出力サイズを求める
		# Output(Height/Width) = ((Input(Height/Width) + (2*Padding size) - Filter size(Height/Width)) / (2 * Stride)) + 1
		conv_output_size = (input_size - filter_size + 2 * filter_pad) / filter_stride + 1
		pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))

		self.params = {}
		self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
		self.params['b1'] = np.zeros(filter_num)
		self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size)
		self.params['b2'] = np.zeros(hidden_size)
		self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size)
		self.params['b3'] = np.zeros(output_size)

		self.layers = OrderedDict()
		self.layers['Conv1'] = Convolution(self.params['W1'], 
										   self.params['b1'], 
										   filter_stride, 
										   filter_pad)

		self.layers['Relu'] = Relu()
		self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
		self.layers['Affine1'] = Affine(self.params['W2'],
										self.paramas['b2'],)
		self.layers['Relu2'] = Relu()
		self.layers['Affine2'] = Affine(self.params['W3'],
										self.params['b3'])
		self.last_layer = SoftmaxWithLoss()
示例#9
0
    def main_graph(self, trained_model, scope, emb_dim, gru, rnn_dim, rnn_num, drop_out=0.5, rad_dim=30, emb=None, ng_embs=None, pixels=None, con_width=None, filters=None, pooling_size=None):
        if trained_model is not None:
            param_dic = {}
            param_dic['nums_chars'] = self.nums_chars
            param_dic['nums_tags'] = self.nums_tags
            param_dic['tag_scheme'] = self.tag_scheme
            param_dic['graphic'] = self.graphic
            param_dic['pic_size'] = self.pic_size
            param_dic['word_vec'] = self.word_vec
            param_dic['radical'] = self.radical
            param_dic['crf'] = self.crf
            param_dic['emb_dim'] = emb_dim
            param_dic['gru'] = gru
            param_dic['rnn_dim'] = rnn_dim
            param_dic['rnn_num'] = rnn_num
            param_dic['drop_out'] = drop_out
            param_dic['filter_size'] = con_width
            param_dic['filters'] = filters
            param_dic['pooling_size'] = pooling_size
            param_dic['font'] = self.font
            param_dic['buckets_char'] = self.buckets_char
            param_dic['ngram'] = self.ngram
            #print param_dic
            f_model = open(trained_model, 'w')
            pickle.dump(param_dic, f_model)
            f_model.close()

        # define shared weights and variables

        dr = tf.placeholder(tf.float32, [], name='drop_out_holder')
        self.drop_out = dr
        self.drop_out_v = drop_out

        if self.word_vec:
            self.emb_layer = EmbeddingLayer(self.nums_chars + 500, emb_dim, weights=emb, name='emb_layer')

        if self.radical:
            self.radical_layer = EmbeddingLayer(216, rad_dim, name='radical_layer')

        if self.ngram is not None:
            if ng_embs is not None:
                assert len(ng_embs) == len(self.ngram)
            else:
                ng_embs = [None for _ in range(len(self.ngram))]
            for i, n_gram in enumerate(self.ngram):
                self.gram_layers.append(EmbeddingLayer(n_gram + 1000 * (i + 2), emb_dim, weights=ng_embs[i], name= str(i + 2) + 'gram_layer'))

        wrapper_conv_1, wrapper_mp_1, wrapper_conv_2, wrapper_mp_2, wrapper_dense, wrapper_dr = None, None, None, None, None, None

        if self.graphic:
            self.input_p = []
            assert pixels is not None and filters is not None and pooling_size is not None and con_width is not None

            self.pixels = pixels
            pixel_dim = int(math.sqrt(len(pixels[0])))

            wrapper_conv_1 = TimeDistributed(Convolution(con_width, 1, filters, name='conv_1'), name='wrapper_c1')
            wrapper_mp_1 = TimeDistributed(Maxpooling(pooling_size, pooling_size, name='pooling_1'), name='wrapper_p1')

            p_size_1 = toolbox.down_pool(pixel_dim, pooling_size)

            wrapper_conv_2 = TimeDistributed(Convolution(con_width, filters, filters, name='conv_2'), name='wrapper_c2')
            wrapper_mp_2 = TimeDistributed(Maxpooling(pooling_size, pooling_size, name='pooling_2'), name='wrapper_p2')

            p_size_2 = toolbox.down_pool(p_size_1, pooling_size)

            wrapper_dense = TimeDistributed(HiddenLayer(p_size_2 * p_size_2 * filters, 100, activation='tanh', name='conv_dense'), name='wrapper_3')
            wrapper_dr = TimeDistributed(DropoutLayer(self.drop_out), name='wrapper_dr')

        with tf.variable_scope('BiRNN'):

            if gru:
                fw_rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_dim)
                bw_rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_dim)
            else:
                fw_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_dim, state_is_tuple=True)
                bw_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_dim, state_is_tuple=True)

            if rnn_num > 1:
                fw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell([fw_rnn_cell]*rnn_num, state_is_tuple=True)
                bw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell([bw_rnn_cell]*rnn_num, state_is_tuple=True)

        output_wrapper = TimeDistributed(HiddenLayer(rnn_dim * 2, self.nums_tags[0], activation='linear', name='hidden'), name='wrapper')

        #define model for each bucket
        for idx, bucket in enumerate(self.buckets_char):
            if idx == 1:
                scope.reuse_variables()
            t1 = time()

            input_v = tf.placeholder(tf.int32, [None, bucket], name='input_' + str(bucket))

            self.input_v.append([input_v])

            emb_set = []

            if self.word_vec:
                word_out = self.emb_layer(input_v)
                emb_set.append(word_out)

            if self.radical:
                input_r = tf.placeholder(tf.int32, [None, bucket], name='input_r' + str(bucket))

                self.input_v[-1].append(input_r)
                radical_out = self.radical_layer(input_r)
                emb_set.append(radical_out)

            if self.ngram is not None:
                for i in range(len(self.ngram)):
                    input_g = tf.placeholder(tf.int32, [None, bucket], name='input_g' + str(i) + str(bucket))
                    self.input_v[-1].append(input_g)
                    gram_out = self.gram_layers[i](input_g)
                    emb_set.append(gram_out)

            if self.graphic:
                input_p = tf.placeholder(tf.float32, [None, bucket, pixel_dim*pixel_dim])
                self.input_p.append(input_p)

                pix_out = tf.reshape(input_p, [-1, bucket, pixel_dim, pixel_dim, 1])
                pix_out = tf.unpack(pix_out, axis=1)

                conv_out_1 = wrapper_conv_1(pix_out)
                pooling_out_1 = wrapper_mp_1(conv_out_1)

                conv_out_2 = wrapper_conv_2(pooling_out_1)
                pooling_out_2 = wrapper_mp_2(conv_out_2)

                assert p_size_2 == pooling_out_2[0].get_shape().as_list()[1]
                pooling_out = tf.reshape(pooling_out_2, [-1, bucket, p_size_2 * p_size_2 * filters])
                pooling_out = tf.unpack(pooling_out, axis=1)

                graphic_out = wrapper_dense(pooling_out)
                graphic_out = wrapper_dr(graphic_out)

                emb_set.append(graphic_out)


            if len(emb_set) > 1:
                emb_out = tf.concat(2, emb_set)
                emb_out = tf.unpack(emb_out)

            else:
                emb_out = emb_set[0]

            rnn_out = BiLSTM(rnn_dim, fw_cell=fw_rnn_cell, bw_cell=bw_rnn_cell, p=dr, name='BiLSTM' + str(bucket), scope='BiRNN')(emb_out, input_v)

            output = output_wrapper(rnn_out)

            output_c = tf.pack(output, axis=1)

            self.output.append([output_c])

            self.output_.append([tf.placeholder(tf.int32, [None, bucket], name='tags' + str(bucket))])

            self.bucket_dit[bucket] = idx

            print 'Bucket %d, %f seconds' % (idx + 1, time() - t1)

        assert len(self.input_v) == len(self.output) and len(self.output) == len(self.output_) and len(self.output) == len(self.counts)

        self.params = tf.trainable_variables()

        self.saver = tf.train.Saver()
示例#10
0
    def __init__(self,
                 input_dim,
                 conv_params=[
                     {
                         'filter_num': 32,
                         'filter_size': 9,
                         'pad': 0,
                         'stride': 3
                     },
                     {
                         'filter_num': 64,
                         'filter_size': 5,
                         'pad': 2,
                         'stride': 1
                     },
                     {
                         'filter_num': 128,
                         'filter_size': 7,
                         'pad': 0,
                         'stride': 1
                     },
                 ],
                 hidden_size=128,
                 dropout_ratio=[0.2, 0.5],
                 output_size=5):
        self.params = {}
        self.layers = {}
        pre_shape = input_dim
        for idx, conv_param in enumerate(conv_params):
            # init parameters
            self.params['W' + str(idx + 1)] = init_he(pre_shape[0] * conv_param['filter_size']**2) *\
                np.random.randn(
                    conv_param['filter_num'],
                    pre_shape[0],
                    conv_param['filter_size'],
                    conv_param['filter_size'])
            self.params['b' + str(idx + 1)] = np.zeros(
                conv_param['filter_num'])

            # set layers
            self.layers['Conv' + str(idx + 1)] = Convolution(
                self.params['W' + str(idx + 1)],
                self.params['b' + str(idx + 1)], conv_param['stride'],
                conv_param['pad'])
            self.layers['Relu' + str(idx + 1)] = Relu()

            # calc output image size of conv layers
            pre_shape = self.layers['Conv' +
                                    str(idx + 1)].output_size(pre_shape)

        idx = len(conv_params)

        # init parameters and set layers Affine
        self.params['W' + str(idx + 1)] = init_he(pre_shape[0] * pre_shape[1]**2) *\
            np.random.randn(pre_shape[0] * pre_shape[1]**2, hidden_size)
        self.params['b' + str(idx + 1)] = np.zeros(hidden_size)
        self.layers['Affine' + str(idx + 1)] = Affine(
            self.params['W' + str(idx + 1)], self.params['b' + str(idx + 1)])
        self.layers['Relu' + str(idx + 1)] = Relu()
        idx += 1

        # init parameters and set layers output
        self.params['W' +
                    str(idx + 1)] = init_he(hidden_size) * np.random.randn(
                        hidden_size, output_size)
        self.params['b' + str(idx + 1)] = np.zeros(output_size)
        self.layers['Affine' + str(idx + 1)] = Affine(
            self.params['W' + str(idx + 1)], self.params['b' + str(idx + 1)])

        # set loss function layer
        self.loss_layer = SoftmaxWithLoss()
示例#11
0
img_rows = 28
img_cols = 28
input_shape = (1, img_rows, img_cols)
(train_x, train_y), (test_x, test_y) = mnist.load_data()
train_x = np.reshape(train_x, (len(train_x), 1, img_rows, img_cols)).astype(skml_config.config.i_type)
train_y = convert_to_one_hot(train_y, num_classes)
test_x = np.reshape(test_x, (len(test_x), 1, img_rows, img_cols)).astype(skml_config.config.i_type)
test_y = convert_to_one_hot(test_y, num_classes)

train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y)


filters = 64
model = Sequential()
model.add(Convolution(filters, 3, input_shape=input_shape))
model.add(BatchNormalization())
model.add(ReLU())
model.add(MaxPooling(2))
model.add(Convolution(filters, 3))
model.add(BatchNormalization())
model.add(ReLU())
model.add(GlobalAveragePooling())
model.add(Affine(num_classes))
model.compile(SoftmaxCrossEntropy(), Adam())

train_batch_size = 100
valid_batch_size = 1
print("訓練開始: {}".format(datetime.now().strftime("%Y/%m/%d %H:%M")))
model.fit(train_x, train_y, train_batch_size, 20, validation_data=(valid_batch_size, valid_x, valid_y), validation_steps=1)
print("訓練終了: {}".format(datetime.now().strftime("%Y/%m/%d %H:%M")))
示例#12
0
# coding: utf-8
from layers import (Convolution, FullyConnect, Flatten)
from activator import ReLU, Sigmoid
from pool import MaxPool
from nn import Module
from temp_data_utils import Minist
from loss_function import SoftmaxEntorpy, MeanSquareError

batch_size = 16
learning_rate = 1e-3
turns = 1000

l = [
    Convolution((16, 1, 5, 5)),
    MaxPool(),
    ReLU(),
    Convolution((32, 16, 3, 3)),
    MaxPool(),
    ReLU(),
    Convolution((32, 32, 3, 3)),
    ReLU(),
    Flatten(),
    FullyConnect((288, 10))
]

minist = Minist()
module = Module(l, SoftmaxEntorpy())

for i in range(turns):
    datas, labels = minist.next_batch(batch_size)
    datas = datas / 255.0
示例#13
0
from keras import optimizers
from keras import backend as K

import warnings
warnings.filterwarnings('ignore')

inputs = np.random.uniform(size=(10, 3, 30, 30))
params = {
    'kernel_h': 5,
    'kernel_w': 5,
    'pad': 0,
    'stride': 2,
    'in_channel': inputs.shape[1],
    'out_channel': 64,
}
layer = Convolution(params)
out = layer.forward(inputs)

keras_model = keras.Sequential()
keras_layer = layers.Conv2D(filters=params['out_channel'],
                            kernel_size=(params['kernel_h'],
                                         params['kernel_w']),
                            strides=(params['stride'], params['stride']),
                            padding='valid',
                            data_format='channels_first',
                            input_shape=inputs.shape[1:])
keras_model.add(keras_layer)
sgd = optimizers.SGD(lr=0.01)
keras_model.compile(loss='mean_squared_error', optimizer='sgd')
weights = np.transpose(layer.weights, (2, 3, 1, 0))
keras_layer.set_weights([weights, layer.bias])
from network import Network
from layers import Relu, Softmax, Linear, Convolution, Pooling, Flatten
from loss import CrossEntropyLoss
from optimizer import SGDOptimizer, AdagradOptimizer
from solve_net import solve_net
from mnist import load_mnist_for_cnn

import theano.tensor as T

batch_size = 50  ###batchsize=32, test will get 16 left, results in error

train_data, test_data, train_label, test_label = load_mnist_for_cnn('data')
model = Network()
model.add(Convolution('conv1', 5, 1, 8, 0.1, 28,
                      batch_size))  # output size: N x 4 x 24 x 24
model.add(Relu('relu1'))
model.add(Pooling('pool1', 2))  # output size: N x 4 x 12 x 12
model.add(Convolution('conv2', 5, 8, 16, 0.1, 12,
                      batch_size))  # output size: N x 8 x 10 x 10
model.add(Relu('relu2'))
model.add(Pooling('pool2', 2))  # output size: N x 8 x 5 x 5
model.add(Flatten('View'))  # my own layer, to reshape the output
model.add(Linear('fc3', 16 * 4 * 4, 10,
                 0.1))  # input reshaped to N x 200 in Linear layer
model.add(Softmax('softmax'))

loss = CrossEntropyLoss(name='xent')

optim = AdagradOptimizer(learning_rate=0.0001, eps=1e-8)

input_placeholder = T.ftensor4('input')
    def build_model(self, inputs, is_training):

        if self._final_endpoint not in self._endpoints:
            raise ValueError("Unknown final endpoint {}".format(self._final_endpoint))

	net = inputs
        end_points = {}
        layers = {}
        idx = 0

        def inception_block(net, is_training, end_point, layers_config, layers, idx):
            with tf.variable_scope(end_point):
                with tf.variable_scope('Branch_0'):
                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(net, 'Conv3d_0a_1x1', layers_config['Branch_0'][0],
                        kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_0 = layers[idx].forward(); idx += 1

                with tf.variable_scope('Branch_1'):
                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(net, 'Conv3d_0a_1x1', layers_config['Branch_1'][0],
                        kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_1 = layers[idx].forward(); idx += 1
                    # 3x3x3 Conv, stride 1
                    layers[idx] = Convolution(branch_1, 'Conv3d_0b_3x3', layers_config['Branch_1'][1],
                        kernel_size=3, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_1 = layers[idx].forward(); idx += 1

                with tf.variable_scope('Branch_2'):
                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(net, 'Conv3d_0a_1x1', layers_config['Branch_2'][0],
                        kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_2 = layers[idx].forward(); idx += 1
                    # 3x3x3 Conv, stride 1
                    layers[idx] = Convolution(branch_2, 'Conv3d_0b_3x3', layers_config['Branch_2'][1],
                        kernel_size=3, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_2 = layers[idx].forward(); idx += 1

                with tf.variable_scope('Branch_3'):
                    # 3x3x3 Max-pool, stride 1, 1, 1
                    layers[idx] = Pooling(net, 'MaxPool3d_0a_3x3',
                        kernel_size=layers_config['Branch_3'][0], strides=layers_config['Branch_3'][1],
                        padding='SAME', pool='MAX')
                    branch_3 = layers[idx].forward(); idx += 1
                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(branch_3, 'Conv3d_0b_1x1', layers_config['Branch_3'][2],
                        kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                    branch_3 = layers[idx].forward(); idx += 1

                # Concat branch_[0-3]
                net = tf.concat([branch_0, branch_1, branch_2, branch_3], 4)

            return net, idx

        print('Inputs: {}'.format(net.get_shape().as_list()))

        with tf.variable_scope('RGB'):
            with tf.variable_scope('inception_i3d'):
                # 7x7x7 Conv, stride 2
                end_point = 'Conv3d_1a_7x7'
                layers[idx] = Convolution(net, end_point, 64,
                    kernel_size=7, stride=2, is_training=is_training, num_cores=self._num_cores,
                    use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                net = layers[idx].forward(); idx += 1
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # 1x3x3 Max-pool, stride 1, 2, 2
                end_point = 'MaxPool3d_2a_3x3'
                layers[idx] = Pooling(net, end_point, kernel_size=[1, 1, 3, 3, 1],
                    strides=[1, 1, 2, 2, 1], padding='SAME', pool='MAX')
                net = layers[idx].forward(); idx += 1
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # 1x1x1 Conv, stride 1
                end_point = 'Conv3d_2b_1x1'
                layers[idx] = Convolution(net, end_point, 64,
                    kernel_size=1, stride=1, is_training=is_training, num_cores=self._num_cores,
                    use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                net = layers[idx].forward(); idx += 1
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # 3x3x3 Conv, stride 1
                end_point = 'Conv3d_2c_3x3'
                layers[idx] = Convolution(net, end_point, 192,
                    kernel_size=3, stride=1, is_training=is_training, num_cores=self._num_cores,
                    use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm)
                net = layers[idx].forward(); idx += 1
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # 1x3x3 Max-pool, stride 1, 2, 2
                end_point = 'MaxPool3d_3a_3x3'
                layers[idx] = Pooling(net, end_point, kernel_size=[1, 1, 3, 3, 1],
                    strides=[1, 1, 2, 2, 1], padding='SAME', pool='MAX')
                net = layers[idx].forward(); idx += 1
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 3b : Inception block
                end_point = 'Mixed_3b'
                layers_config = {
                    'Branch_0': [64],
                    'Branch_1': [96, 128],
                    'Branch_2': [16, 32],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 32]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 3c: Inception block
                end_point = 'Mixed_3c'
                layers_config = {
                    'Branch_0': [128],
                    'Branch_1': [128, 192],
                    'Branch_2': [32, 96],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 64]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # 3x3x3 Max-pool, stride 2, 2, 2
                end_point = 'MaxPool3d_4a_3x3'
                layers[idx] = Pooling(net, end_point, kernel_size=[1, 3, 3, 3, 1],
                    strides=[1, 2, 2, 2, 1], padding='SAME', pool='MAX')
                net = layers[idx].forward(); idx += 1
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 4b: Inception block
                end_point = 'Mixed_4b'
                layers_config = {
                    'Branch_0': [192],
                    'Branch_1': [96, 208],
                    'Branch_2': [16, 48],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 64]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 4c: Inception block
                end_point = 'Mixed_4c'
                layers_config = {
                    'Branch_0': [160],
                    'Branch_1': [112, 224],
                    'Branch_2': [24, 64],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 64]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 4d: Inception block
                end_point = 'Mixed_4d'
                layers_config = {
                    'Branch_0': [128],
                    'Branch_1': [128, 256],
                    'Branch_2': [24, 64],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 64]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 4e: Inception block
                end_point = 'Mixed_4e'
                layers_config = {
                    'Branch_0': [112],
                    'Branch_1': [144, 288],
                    'Branch_2': [32, 64],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 64]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 4f: Inception block
                end_point = 'Mixed_4f'
                layers_config = {
                    'Branch_0': [256],
                    'Branch_1': [160, 320],
                    'Branch_2': [32, 128],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 128]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # 2x2x2 Max-pool, stride 2x2x2
                end_point = 'MaxPool3d_5a_2x2'
                layers[idx] = Pooling(net, end_point, kernel_size=[1, 2, 2, 2, 1],
                    strides=[1, 2, 2, 2, 1], padding='SAME', pool='MAX')
                net = layers[idx].forward(); idx += 1
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 5b: Inception block
                end_point = 'Mixed_5b'
                layers_config = {
                    'Branch_0': [256],
                    'Branch_1': [160, 320],
                    'Branch_2': [32, 128],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 128]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Mixed 5c: Inception block
                end_point = 'Mixed_5c'
                layers_config = {
                    'Branch_0': [384],
                    'Branch_1': [192, 384],
                    'Branch_2': [48, 128],
                    'Branch_3': [[1, 3, 3, 3, 1], [1, 1, 1, 1, 1], 128]
                }
                net, idx = inception_block(net, is_training, end_point, layers_config, layers, idx)
                get_shape = net.get_shape().as_list()
                print('{} : {}'.format(end_point, get_shape))

                end_points[end_point] = net
                if self._final_endpoint == end_point: return net, end_points, layers

                # Logits
                end_point = 'Logits'
                with tf.variable_scope(end_point):
                    # 2x7x7 Average-pool, stride 1, 1, 1
                    layers[idx] = Pooling(net, 'Logits', kernel_size=[1, 2, 7, 7, 1],
                        strides=[1, 1, 1, 1, 1], padding='VALID', pool='AVG')
                    net = layers[idx].forward(); idx += 1
                    get_shape = net.get_shape().as_list()
                    print('{} / Average-pool3D: {}'.format(end_point, get_shape))
                    end_points[end_point + '_average_pool3d'] = net

                    # Dropout
                    net = tf.nn.dropout(net, self._keep)

                    # 1x1x1 Conv, stride 1
                    layers[idx] = Convolution(net, 'Conv3d_0c_1x1', self._num_classes,
                        kernel_size=1, stride=1, activation=None,
                        use_batch_norm=self._batch_norm, use_cross_replica_batch_norm=self._cross_replica_batch_norm,
                        is_training=is_training, num_cores=self._num_cores)
                    logits = layers[idx].forward(); idx += 1
                    get_shape = logits.get_shape().as_list()
                    print('{} / Conv3d_0c_1x1 : {}'.format(end_point, get_shape))

                    if self._spatial_squeeze:
                        # Removes dimensions of size 1 from the shape of a tensor
                        # Specify which dimensions have to be removed: 2 and 3
                        logits = tf.squeeze(logits, [2, 3], name='SpatialSqueeze')
                        get_shape = logits.get_shape().as_list()
                        print('{} / Spatial Squeeze : {}'.format(end_point, get_shape))

                averaged_logits = tf.reduce_mean(logits, axis=1)
                get_shape = averaged_logits.get_shape().as_list()
                print('{} / Averaged Logits : {}'.format(end_point, get_shape))

                end_points[end_point] = averaged_logits
                if self._final_endpoint == end_point: return averaged_logits, end_points, layers

                # Predictions
                end_point = 'Predictions'
                predictions = tf.nn.softmax(
                    averaged_logits)
                end_points[end_point] = predictions
                return predictions, end_points, layers

        def loss_function(self, label_placeholder, output_logits, **kwargs):
            self._loss = tf.losses.softmax_cross_entropy(
                logits=output_logits,
                onehot_labels=one_hot_labels,
                label_smoothing=kwargs['label_smoothing']
            )

            return self._loss

        def performance_metric(self, label_placeholder, output_logits, **kwargs):
            predictions = tf.argmax(output_logits, axis=1)
            ground_truth = tf.argmax(label_placeholder, axis=1)
            correct = tf.equal(predictions, ground_truth)
            self._perf = tf.reduce_mean(tf.cast(correct, tf.float32))

            return self._perf
示例#16
0
        'Specify the type of neural network: cnn, mlp and whether to train or evaluate'
    )
neural_network_type = sys.argv[1]

if neural_network_type == 'mlp':
    hyperparameters = {
        'architecture': (Dense(1024), ReLU(), Dense(256), ReLU(),
                         Dense(10)),  # 1024 because 32x32 for cifar10
        'epsilon': 1e-6,
        'lr': 5e-2,
        'batch_size': 64,
        'n_epochs': 100
    }
    sample_shape = (784, )
elif neural_network_type == 'cnn':
    architecture = (Convolution(3, 32), ReLU(), Convolution(3, 32), ReLU(),
                    Convolution(3, 32), ReLU(), Flatten(), Dense(10))

    hyperparameters = {
        'architecture': architecture,
        'epsilon': 1e-6,
        'lr': 5e-2,
        'batch_size': 64,
        'n_epochs': 1
    }
    sample_shape = (28, 28, 1)
else:
    raise ValueError('Unknown neural network type')

if sys.argv[2] == 'evaluate':
    if len(sys.argv) < 3:
示例#17
0
        ),
        'epsilon': 1e-6,
        'lr': 5e-2,
        'batch_size': 64,
        'n_epochs': 1,
    },
}

cnn_config = {
    'nn_type': 'cnn',
    'dataset_name': 'mnist',
    'eval_while_training': False,
    'mnist_sample_shape': (28, 28, 1),
    'hyperparameters': {
        'architecture': (
            Convolution(3, 32),
            ReLU(),
            Convolution(3, 32),
            ReLU(),
            Convolution(3, 32),
            ReLU(),
            Flatten(),
            Dense(10)
        ),
        'epsilon': 1e-6,
        'lr': 5e-2,
        'batch_size': 64,
        'n_epochs': 1,
    },
}
示例#18
0
from network import Network
from layers import Relu, Softmax, Linear, Convolution, Pooling
from loss import CrossEntropyLoss
from optimizer import SGDOptimizer
from optimizer import AdagradOptimizer
from solve_net import solve_net
from mnist import load_mnist_for_cnn

import theano.tensor as T
import theano

theano.config.floatX = 'float32'

train_data, test_data, train_label, test_label = load_mnist_for_cnn('data')
model = Network()
model.add(Convolution('conv1', 5, 1, 8, 0.01))  # output size: N x 4 x 24 x 24
model.add(Relu('relu1'))
model.add(Pooling('pool1', 2))  # output size: N x 4 x 12 x 12
model.add(Convolution('conv2', 3, 8, 12, 0.01))  # output size: N x 8 x 10 x 10
model.add(Relu('relu2'))
model.add(Pooling('pool2', 2))  # output size: N x 8 x 5 x 5
model.add(Linear('fc3', 12 * 5 * 5, 10,
                 0.01))  # input reshaped to N x 200 in Linear layer
model.add(Softmax('softmax'))

loss = CrossEntropyLoss(name='xent')

# optim = SGDOptimizer(learning_rate=0.0001, weight_decay=0.005, momentum=0.9)
optim = AdagradOptimizer(learning_rate=0.0002, eps=1e-5)

input_placeholder = T.ftensor4('input')
示例#19
0
def test_cnn(test_image_name):
    test_img = mpimg.imread(test_image_name)
    pkl_file = open('params.pkl', 'rb')
    type = test_image_name.rsplit(".")[1]
    params = pickle.load(pkl_file)
    params1, params2, params3, params4, params5, params6, params7, params8, params9 = params
    h, w, ip_channels = test_img.shape
    out_channels = 64
    filter_size = 3
    padding = 1
    layer_1 = Convolution(ip_channels, out_channels, filter_size, padding)
    layer_1.weights = params1[0]
    layer_1.bias=params1[1]
    relu_1 = ReLU()
    layer_2 = Convolution(out_channels, out_channels, filter_size, padding)
    layer_2.weights = params2[0]
    layer_2.bias = params2[1]
    relu_2 = ReLU()
    layer_3 = Convolution(out_channels, out_channels, filter_size, padding)
    layer_3.weights = params3[0]
    layer_3.bias = params3[1]
    relu_3 = ReLU()
    layer_4 = Convolution(out_channels, out_channels, filter_size, padding)
    layer_4.weights = params4[0]
    layer_4.bias = params4[1]
    relu_4 = ReLU()
    layer_5 = Convolution(out_channels, out_channels, filter_size, padding)
    layer_5.weights = params5[0]
    layer_5.bias = params5[1]
    relu_5 = ReLU()
    layer_6 = Convolution(out_channels, out_channels, filter_size, padding)
    layer_6.weights = params6[0]
    layer_6.bias = params6[1]
    relu_6 = ReLU()
    layer_7 = Convolution(out_channels, out_channels, filter_size, padding)
    layer_7.weights = params7[0]
    layer_7.bias = params7[1]
    relu_7 = ReLU()
    layer_8 = Convolution(out_channels, out_channels, filter_size, padding)
    layer_8.weights = params8[0]
    layer_8.bias = params8[1]
    relu_8 = ReLU()
    layer_9 = Convolution(out_channels, ip_channels, filter_size, padding)
    layer_9.weights = params9[0]
    layer_9.bias = params9[1]
    layers = [(layer_1, relu_1), (layer_2, relu_2), (layer_3, relu_3), (layer_4, relu_4), (layer_5, relu_5),
              (layer_6, relu_6),
              (layer_7, relu_7), (layer_8, relu_8), (layer_9, None)]
    cnn_obj = CNN(layers)
    out_img = cnn_obj.forward(test_img)
    output_file_name = test_image_name + "_sr" + str(h) + "." + type
    cv2.imwrite(output_file_name, out_img)
示例#20
0
network = SimpleConvNet(input_dim=(1,28,28), 
                        conv_param = {'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
                        hidden_size=100, output_size=10, weight_init_std=0.01)

# 学習後の重み
network.load_params("params.pkl")

filter_show(network.params['W1'], 16)

img = imread('../data/lena_gray.png')
img = img.reshape(1, 1, *img.shape)

fig = plt.figure()

w_idx = 1

for i in range(16):
    w = network.params['W1'][i]
    b = 0  # network.params['b1'][i]

    w = w.reshape(1, *w.shape)
    #b = b.reshape(1, *b.shape)
    conv_layer = Convolution(w, b) 
    out = conv_layer.forward(img)
    out = out.reshape(out.shape[2], out.shape[3])
    
    ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
    ax.imshow(out, cmap=plt.cm.gray_r, interpolation='nearest')

plt.show()
示例#21
0
    def main_graph(self, trained_model, scope, emb_dim, gru, rnn_dim, rnn_num, drop_out=0.5, rad_dim=30, emb=None,
                   ngram_embedding=None, pixels=None, con_width=None, filters=None, pooling_size=None):
        """

        :param trained_model:
        :param scope:
        :param emb_dim:
        :param gru:
        :param rnn_dim:
        :param rnn_num:
        :param drop_out:
        :param rad_dim: n
        :param emb:
        :param ngram_embedding: 预训练 ngram embeddig 文件
        :param pixels:
        :param con_width:
        :param filters:
        :param pooling_size:
        :return:
        """
        # trained_model: 模型存储路径
        if trained_model is not None:
            param_dic = {'nums_chars': self.nums_chars, 'nums_tags': self.nums_tags, 'tag_scheme': self.tag_scheme,
                         'graphic': self.graphic, 'pic_size': self.pic_size, 'word_vec': self.word_vec,
                         'radical': self.radical, 'crf': self.crf, 'emb_dim': emb_dim, 'gru': gru, 'rnn_dim': rnn_dim,
                         'rnn_num': rnn_num, 'drop_out': drop_out, 'filter_size': con_width, 'filters': filters,
                         'pooling_size': pooling_size, 'font': self.font, 'buckets_char': self.buckets_char,
                         'ngram': self.ngram}
            print "RNN dimension is %d" % rnn_dim
            print "RNN number is %d" % rnn_num
            print "Character embedding size is %d" % emb_dim
            print "Ngram embedding dimension is %d" % emb_dim
            # 存储模型超参数
            if self.metric == 'All':
                # rindex() 返回子字符串 str 在字符串中最后出现的位置
                # 截取模型文件名
                pindex = trained_model.rindex('/') + 1
                for m in self.all_metrics:
                    f_model = open(trained_model[:pindex] + m + '_' + trained_model[pindex:], 'w')
                    pickle.dump(param_dic, f_model)
                    f_model.close()
            else:
                f_model = open(trained_model, 'w')
                pickle.dump(param_dic, f_model)
                f_model.close()

        # define shared weights and variables

        dr = tf.placeholder(tf.float32, [], name='drop_out_holder')
        self.drop_out = dr
        self.drop_out_v = drop_out

        # 字向量层
        # 为什么字符数要加 500 ?
        # emb_dim 是每个字符的特征向量维度,可以通过命令行参数设置
        # weights 表示预训练的字向量,可以通过命令行参数设置
        if self.word_vec:
            self.emb_layer = EmbeddingLayer(self.nums_chars + 500, emb_dim, weights=emb, name='emb_layer')

        # 偏旁部首向量
        # 依照《康熙字典》,共有 214 个偏旁部首。
        # 只用了常见汉字的偏旁部首,非常见汉字和非汉字的偏旁部首用其他两个特殊符号代替,
        # 所以共有 216 个偏旁部首
        if self.radical:
            self.radical_layer = EmbeddingLayer(216, rad_dim, name='radical_layer')

        if self.ngram is not None:
            if ngram_embedding is not None:
                assert len(ngram_embedding) == len(self.ngram)
            else:
                ngram_embedding = [None for _ in range(len(self.ngram))]
            for i, n_gram in enumerate(self.ngram):
                self.gram_layers.append(EmbeddingLayer(n_gram + 1000 * (i + 2), emb_dim, weights=ngram_embedding[i],
                                                       name=str(i + 2) + 'gram_layer'))

        wrapper_conv_1, wrapper_mp_1, wrapper_conv_2, wrapper_mp_2, wrapper_dense, wrapper_dr = \
            None, None, None, None, None, None

        if self.graphic:
            # 使用图像信息,需要用到 CNN
            self.input_p = []
            assert pixels is not None and filters is not None and pooling_size is not None and con_width is not None

            self.pixels = pixels
            pixel_dim = int(math.sqrt(len(pixels[0])))

            wrapper_conv_1 = TimeDistributed(Convolution(con_width, 1, filters, name='conv_1'), name='wrapper_c1')
            wrapper_mp_1 = TimeDistributed(Maxpooling(pooling_size, pooling_size, name='pooling_1'), name='wrapper_p1')

            p_size_1 = toolbox.down_pool(pixel_dim, pooling_size)

            wrapper_conv_2 = TimeDistributed(Convolution(con_width, filters, filters, name='conv_2'), name='wrapper_c2')
            wrapper_mp_2 = TimeDistributed(Maxpooling(pooling_size, pooling_size, name='pooling_2'), name='wrapper_p2')

            p_size_2 = toolbox.down_pool(p_size_1, pooling_size)

            wrapper_dense = TimeDistributed(
                HiddenLayer(p_size_2 * p_size_2 * filters, 100, activation='tanh', name='conv_dense'), name='wrapper_3')
            wrapper_dr = TimeDistributed(DropoutLayer(self.drop_out), name='wrapper_dr')

        with tf.variable_scope('BiRNN'):

            if gru:
                fw_rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_dim)
                bw_rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_dim)
            else:
                fw_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_dim, state_is_tuple=True)
                bw_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_dim, state_is_tuple=True)

            if rnn_num > 1:
                fw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell([fw_rnn_cell] * rnn_num, state_is_tuple=True)
                bw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell([bw_rnn_cell] * rnn_num, state_is_tuple=True)

        # 隐藏层,输入是前向 RNN 的输出加上 后向 RNN 的输出,所以输入维度为 rnn_dim * 2
        # 输出维度即标签个数
        output_wrapper = TimeDistributed(
            HiddenLayer(rnn_dim * 2, self.nums_tags[0], activation='linear', name='hidden'),
            name='wrapper')

        # define model for each bucket
        # 每一个 bucket 中的句子长度不一样,所以需要定义单独的模型
        # bucket: bucket 中的句子长度
        for idx, bucket in enumerate(self.buckets_char):
            if idx == 1:
                # scope 是 tf.variable_scope("tagger", reuse=None, initializer=initializer)
                # 只需要设置一次 reuse,后面就都 reuse 了
                scope.reuse_variables()
            t1 = time()

            # 输入的句子,one-hot 向量
            # shape = (batch_size, 句子长度)
            input_sentences = tf.placeholder(tf.int32, [None, bucket], name='input_' + str(bucket))

            self.input_v.append([input_sentences])

            emb_set = []

            if self.word_vec:
                # 根据 one-hot 向量查找对应的字向量
                # word_out: shape=(batch_size, 句子长度,字向量维度(64))
                word_out = self.emb_layer(input_sentences)
                emb_set.append(word_out)

            if self.radical:
                # 嵌入偏旁部首信息,shape = (batch_size, 句子长度)
                input_radicals = tf.placeholder(tf.int32, [None, bucket], name='input_r' + str(bucket))

                self.input_v[-1].append(input_radicals)
                radical_out = self.radical_layer(input_radicals)
                emb_set.append(radical_out)

            if self.ngram is not None:
                for i in range(len(self.ngram)):
                    input_g = tf.placeholder(tf.int32, [None, bucket], name='input_g' + str(i) + str(bucket))
                    self.input_v[-1].append(input_g)
                    gram_out = self.gram_layers[i](input_g)
                    emb_set.append(gram_out)

            if self.graphic:
                input_p = tf.placeholder(tf.float32, [None, bucket, pixel_dim * pixel_dim])
                self.input_p.append(input_p)

                pix_out = tf.reshape(input_p, [-1, bucket, pixel_dim, pixel_dim, 1])

                conv_out_1 = wrapper_conv_1(pix_out)
                pooling_out_1 = wrapper_mp_1(conv_out_1)

                conv_out_2 = wrapper_conv_2(pooling_out_1)
                pooling_out_2 = wrapper_mp_2(conv_out_2)

                assert p_size_2 == pooling_out_2[0].get_shape().as_list()[1]
                pooling_out = tf.reshape(pooling_out_2, [-1, bucket, p_size_2 * p_size_2 * filters])
                pooling_out = tf.unstack(pooling_out, axis=1)

                graphic_out = wrapper_dense(pooling_out)
                graphic_out = wrapper_dr(graphic_out)

                emb_set.append(graphic_out)

            if self.window_size > 1:

                padding_size = int(np.floor(self.window_size / 2))
                word_padded = tf.pad(word_out, [[0, 0], [padding_size, padding_size], [0, 0]], 'CONSTANT')

                Ws = []
                for q in range(1, self.window_size + 1):
                    Ws.append(tf.get_variable("W_%d" % q, shape=[q * emb_dim, self.filters_number]))
                b = tf.get_variable("b", shape=[self.filters_number])

                z = [None for _ in range(0, bucket)]

                for q in range(1, self.window_size + 1):
                    for i in range(padding_size, bucket + padding_size):
                        low = i - int(np.floor((q - 1) / 2))
                        high = i + int(np.ceil((q + 1) / 2))
                        x = word_padded[:, low, :]
                        for j in range(low + 1, high):
                            x = tf.concat(values=[x, word_padded[:, j, :]], axis=1)
                        z_iq = tf.tanh(tf.nn.xw_plus_b(x, Ws[q - 1], b))
                        if z[i - padding_size] is None:
                            z[i - padding_size] = z_iq
                        else:
                            z[i - padding_size] = tf.concat([z[i - padding_size], z_iq], axis=1)

                z = tf.stack(z, axis=1)
                values, indices = tf.nn.top_k(z, sorted=False, k=emb_dim)

                # highway layer
                X = tf.unstack(word_out, axis=1)
                Conv_X = tf.unstack(values, axis=1)
                X_hat = []
                W_t = tf.get_variable("W_t", shape=[emb_dim, emb_dim])
                b_t = tf.get_variable("b_t", shape=[emb_dim])
                for x, conv_x in zip(X, Conv_X):
                    T_x = tf.sigmoid(tf.nn.xw_plus_b(x, W_t, b_t))
                    X_hat.append(tf.multiply(conv_x, T_x) + tf.multiply(x, 1 - T_x))
                X_hat = tf.stack(X_hat, axis=1)
                emb_set.append(X_hat)
            if len(emb_set) > 1:
                # 各种字向量直接 concat 起来(字向量、偏旁部首、n-gram、图像信息等)
                emb_out = tf.concat(axis=2, values=emb_set)

            else:
                emb_out = emb_set[0]

            # rnn_out 是前向 RNN 的输出和后向 RNN 的输出 concat 之后的值
            rnn_out = BiLSTM(rnn_dim, fw_cell=fw_rnn_cell, bw_cell=bw_rnn_cell, p=dr,
                             name='BiLSTM' + str(bucket), scope='BiRNN')(self.highway(emb_out, "tag"), input_sentences)

            # 应用全连接层,Wx+b 得到最后的输出
            output = output_wrapper(rnn_out)
            # 为什么要 [output] 而不是 output 呢?
            self.output.append([output])

            self.output_.append([tf.placeholder(tf.int32, [None, bucket], name='tags' + str(bucket))])

            self.bucket_dit[bucket] = idx

            # language model
            lm_rnn_dim = rnn_dim
            with tf.variable_scope('LM-BiRNN'):
                if gru:
                    lm_fw_rnn_cell = tf.nn.rnn_cell.GRUCell(lm_rnn_dim)
                    lm_bw_rnn_cell = tf.nn.rnn_cell.GRUCell(lm_rnn_dim)
                else:
                    lm_fw_rnn_cell = tf.nn.rnn_cell.LSTMCell(lm_rnn_dim, state_is_tuple=True)
                    lm_bw_rnn_cell = tf.nn.rnn_cell.LSTMCell(lm_rnn_dim, state_is_tuple=True)

                if rnn_num > 1:
                    lm_fw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell([lm_fw_rnn_cell] * rnn_num, state_is_tuple=True)
                    lm_bw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell([lm_bw_rnn_cell] * rnn_num, state_is_tuple=True)
            lm_rnn_output = BiLSTM(lm_rnn_dim, fw_cell=lm_fw_rnn_cell,
                                   bw_cell=lm_bw_rnn_cell, p=dr,
                                   name='LM-BiLSTM' + str(bucket),
                                   scope='LM-BiRNN')(self.highway(emb_set[0]), input_sentences)

            lm_output_wrapper = TimeDistributed(
                HiddenLayer(lm_rnn_dim * 2, self.nums_chars + 2, activation='linear', name='lm_hidden'),
                name='lm_wrapper')
            lm_final_output = lm_output_wrapper(lm_rnn_output)
            self.lm_predictions.append([lm_final_output])
            self.lm_groundtruthes.append([tf.placeholder(tf.int32, [None, bucket], name='lm_targets' + str(bucket))])

            print 'Bucket %d, %f seconds' % (idx + 1, time() - t1)

        assert \
            len(self.input_v) == len(self.output) and \
            len(self.output) == len(self.output_) and \
            len(self.lm_predictions) == len(self.lm_groundtruthes) and \
            len(self.output) == len(self.counts)

        self.params = tf.trainable_variables()

        self.saver = tf.train.Saver()
示例#22
0
training_data, training_labels, test_data, test_labels, _ = load_cifar10(
    DATA_PATH, normalize=True)


def make_batch(data, label, size):
    for start, end in zip(range(0, len(data), size),
                          range(size,
                                len(data) + 1, size)):
        yield data[start:end, ...], label[start:end, ...]


network = Network(
    Convolution(input_shape=(32, 32),
                input_depth=3,
                n_filters=32,
                filter_dim=(3, 3),
                stride=(1, 1),
                padding=((1, 1), (1, 1))), ReLU(), BatchNorm(),
    Convolution(input_shape=(32, 32),
                input_depth=32,
                n_filters=32,
                filter_dim=(3, 3),
                stride=(1, 1),
                padding=((1, 1), (1, 1))), ReLU(), BatchNorm(),
    MaxPooling(input_shape=(32, 32),
               input_depth=32,
               filter_dim=(2, 2),
               stride=(2, 2)), Dropout(rate=0.2),
    Convolution(input_shape=(16, 16),
                input_depth=32,
                n_filters=64,
示例#23
0
    def main_graph(self,
                   trained_model,
                   scope,
                   emb_dim,
                   gru,
                   rnn_dim,
                   rnn_num,
                   fnn_dim,
                   window_size,
                   drop_out=0.5,
                   rad_dim=30,
                   emb=None,
                   ng_embs=None,
                   pixels=None,
                   con_width=None,
                   filters=None,
                   pooling_size=None):
        if trained_model is not None:
            param_dic = {}
            param_dic['nums_chars'] = self.nums_chars
            param_dic['nums_tags'] = self.nums_tags
            param_dic['tag_scheme'] = self.tag_scheme
            param_dic['graphic'] = self.graphic
            param_dic['pic_size'] = self.pic_size
            param_dic['word_vec'] = self.word_vec
            param_dic['radical'] = self.radical
            param_dic['crf'] = self.crf
            param_dic['emb_dim'] = emb_dim
            param_dic['gru'] = gru
            param_dic['rnn_dim'] = rnn_dim
            param_dic['rnn_num'] = rnn_num
            param_dic['fnn_dim'] = fnn_dim
            param_dic['window_size'] = window_size
            param_dic['drop_out'] = drop_out
            param_dic['filter_size'] = con_width
            param_dic['filters'] = filters
            param_dic['pooling_size'] = pooling_size
            param_dic['font'] = self.font
            param_dic['buckets_char'] = self.buckets_char
            param_dic['ngram'] = self.ngram
            param_dic['mode'] = self.mode
            #print param_dic
            if self.metric == 'All':
                pindex = trained_model.rindex('/') + 1
                for m in self.all_metrics:
                    f_model = open(
                        trained_model[:pindex] + m + '_' +
                        trained_model[pindex:], 'w')
                    pickle.dump(param_dic, f_model)
                    f_model.close()
            else:
                f_model = open(trained_model, 'w')
                pickle.dump(param_dic, f_model)
                f_model.close()

        # define shared weights and variables

        dr = tf.placeholder(tf.float32, [], name='drop_out_holder')
        self.drop_out = dr
        self.drop_out_v = drop_out

        #concat_emb_dim = emb_dim * 2
        concat_emb_dim = 0

        if self.word_vec:
            self.emb_layer = EmbeddingLayer(self.nums_chars + 500,
                                            emb_dim,
                                            weights=emb,
                                            name='emb_layer')
            concat_emb_dim += emb_dim

        if self.radical:
            self.radical_layer = EmbeddingLayer(216,
                                                rad_dim,
                                                name='radical_layer')
            concat_emb_dim += rad_dim

        if self.ngram is not None:
            if ng_embs is not None:
                assert len(ng_embs) == len(self.ngram)
            else:
                ng_embs = [None for _ in range(len(self.ngram))]
            for i, n_gram in enumerate(self.ngram):
                self.gram_layers.append(
                    EmbeddingLayer(n_gram + 1000 * (i + 2),
                                   emb_dim,
                                   weights=ng_embs[i],
                                   name=str(i + 2) + 'gram_layer'))
                concat_emb_dim += emb_dim

        wrapper_conv_1, wrapper_mp_1, wrapper_conv_2 = None, None, None
        wrapper_mp_2, wrapper_dense, wrapper_dr = None, None, None

        if self.graphic:
            self.input_p = []
            assert pixels is not None and filters is not None and pooling_size is not None and con_width is not None

            self.pixels = pixels
            pixel_dim = int(math.sqrt(len(pixels[0])))

            wrapper_conv_1 = Convolution(con_width, 1, filters, name='conv_1')
            wrapper_mp_1 = Maxpooling(pooling_size,
                                      pooling_size,
                                      name='pooling_1')

            p_size_1 = toolbox.down_pool(pixel_dim, pooling_size)

            wrapper_conv_2 = Convolution(con_width,
                                         filters,
                                         filters,
                                         name='conv_2')
            wrapper_mp_2 = Maxpooling(pooling_size,
                                      pooling_size,
                                      name='pooling_2')
            p_size_2 = toolbox.down_pool(p_size_1, pooling_size)

            wrapper_dense = HiddenLayer(p_size_2 * p_size_2 * filters,
                                        100,
                                        activation='tanh',
                                        name='conv_dense')
            wrapper_dr = DropoutLayer(self.drop_out)

            concat_emb_dim += 100

        fw_rnn_cell, bw_rnn_cell = None, None

        if self.mode == 'RNN':
            with tf.variable_scope('BiRNN'):

                if gru:
                    fw_rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_dim)
                    bw_rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_dim)
                else:
                    fw_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_dim,
                                                          state_is_tuple=True)
                    bw_rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_dim,
                                                          state_is_tuple=True)

                if rnn_num > 1:
                    fw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(
                        [fw_rnn_cell] * rnn_num, state_is_tuple=True)
                    bw_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(
                        [bw_rnn_cell] * rnn_num, state_is_tuple=True)

            output_wrapper = HiddenLayer(rnn_dim * 2,
                                         self.nums_tags[0],
                                         activation='linear',
                                         name='out_wrapper')
            fnn_weights, fnn_bias = None, None

        else:

            with tf.variable_scope('FNN'):
                fnn_weights = tf.get_variable(
                    'conv_w',
                    [2 * window_size + 1, concat_emb_dim, 1, fnn_dim])
                fnn_bias = tf.get_variable(
                    'conv_b', [fnn_dim],
                    initializer=tf.constant_initializer(0.1))

            output_wrapper = HiddenLayer(fnn_dim,
                                         self.nums_tags[0],
                                         activation='linear',
                                         name='out_wrapper')

        #define model for each bucket
        for idx, bucket in enumerate(self.buckets_char):
            if idx == 1:
                scope.reuse_variables()
            t1 = time()

            input_v = tf.placeholder(tf.int32, [None, bucket],
                                     name='input_' + str(bucket))

            self.input_v.append([input_v])

            emb_set = []

            if self.word_vec:
                word_out = self.emb_layer(input_v)
                emb_set.append(word_out)

            if self.radical:
                input_r = tf.placeholder(tf.int32, [None, bucket],
                                         name='input_r' + str(bucket))

                self.input_v[-1].append(input_r)
                radical_out = self.radical_layer(input_r)
                emb_set.append(radical_out)

            if self.ngram is not None:
                for i in range(len(self.ngram)):
                    input_g = tf.placeholder(tf.int32, [None, bucket],
                                             name='input_g' + str(i) +
                                             str(bucket))
                    self.input_v[-1].append(input_g)
                    gram_out = self.gram_layers[i](input_g)
                    emb_set.append(gram_out)

            if self.graphic:
                input_p = tf.placeholder(tf.float32,
                                         [None, bucket, pixel_dim * pixel_dim])
                self.input_p.append(input_p)
                pix_out = tf.reshape(input_p, [-1, pixel_dim, pixel_dim, 1])

                conv_out_1 = wrapper_conv_1(pix_out)
                pooling_out_1 = wrapper_mp_1(conv_out_1)

                conv_out_2 = wrapper_conv_2(pooling_out_1)
                pooling_out_2 = wrapper_mp_2(conv_out_2)

                assert p_size_2 == pooling_out_2[0].get_shape().as_list()[1]

                pooling_out = tf.reshape(
                    pooling_out_2, [-1, bucket, p_size_2 * p_size_2 * filters])

                graphic_out = wrapper_dense(pooling_out)
                graphic_out = wrapper_dr(graphic_out)

                emb_set.append(graphic_out)

            if len(emb_set) > 1:
                emb_out = tf.concat(axis=2, values=emb_set)

            else:
                emb_out = emb_set[0]

            if self.mode == 'RNN':
                rnn_out = BiLSTM(rnn_dim,
                                 fw_cell=fw_rnn_cell,
                                 bw_cell=bw_rnn_cell,
                                 p=dr,
                                 name='BiLSTM' + str(bucket),
                                 scope='BiRNN')(emb_out, input_v)

                output = output_wrapper(rnn_out)

            else:
                emb_out = tf.pad(emb_out,
                                 [[0, 0], [window_size, window_size], [0, 0]])
                emb_out = tf.reshape(
                    emb_out, [-1, bucket + 2 * window_size, concat_emb_dim, 1])
                conv_out = tf.nn.conv2d(emb_out,
                                        fnn_weights, [1, 1, 1, 1],
                                        padding='VALID') + fnn_bias
                fnn_out = tf.nn.tanh(conv_out)
                fnn_out = tf.reshape(fnn_out, [-1, bucket, fnn_dim])

                output = output_wrapper(fnn_out)

            self.output.append([output])

            self.output_.append([
                tf.placeholder(tf.int32, [None, bucket],
                               name='tags' + str(bucket))
            ])

            self.bucket_dit[bucket] = idx

            print 'Bucket %d, %f seconds' % (idx + 1, time() - t1)

        assert len(self.input_v) == len(self.output) and len(self.output) == len(self.output_) \
               and len(self.output) == len(self.counts)

        self.params = tf.trainable_variables()

        self.saver = tf.train.Saver()
示例#24
0
    X_te /= 255.
    mean = np.mean(X_tr, axis=0)
    X_tr -= mean
    X_te -= mean
    y_tr = y_tr.reshape((-1, 1))
    y_te = y_te.reshape((-1, 1))

    model = Model(verbose=True)
    batch_size = 1024
    n_classes = 10
    std = 0.01
    reg = 0.0

    model.add_layer(
        Convolution(32, (3, 3),
                    input_shape=(batch_size, X_tr.shape[1], X_tr.shape[2],
                                 X_tr.shape[3]),
                    weight_initializer=NormalInitializer(std)))
    model.add_layer(ReLuActivation())
    model.add_layer(BatchNormalization())
    model.add_layer(
        Convolution(32, (3, 3),
                    weight_initializer=NormalInitializer(std),
                    padding='same'))

    model.add_layer(ReLuActivation())
    model.add_layer(MaxPool((2, 2)))
    model.add_layer(Flatten())

    model.add_layer(
        Affine(100, weight_initializer=NormalInitializer(std), reg=reg))
    model.add_layer(ReLuActivation())
示例#25
0
batch = 10
conv_params={
    'kernel_h': 3,
    'kernel_w': 3,
    'pad': 0,
    'stride': 2,
    'in_channel': 3,
    'out_channel': 10
}
in_height = 10
in_width = 20
out_height = 1+(in_height+2*conv_params['pad']-conv_params['kernel_h'])//conv_params['stride']
out_width = 1+(in_width+2*conv_params['pad']-conv_params['kernel_w'])//conv_params['stride']
inputs = np.random.uniform(size=(batch, conv_params['in_channel'], in_height, in_width))
in_grads = np.random.uniform(size=(batch, conv_params['out_channel'], out_height, out_width))
conv = Convolution(conv_params)
check_grads_layer(conv, inputs, in_grads)
#
# ## Dropout
# ratio = 0.1
# height = 10
# width = 20
# channel = 10
# np.random.seed(1234)
# inputs = np.random.uniform(size=(batch, channel, height, width))
# in_grads = np.random.uniform(size=(batch, channel, height, width))
# dropout = Dropout(ratio, seed=1234)
# dropout.set_mode(True)
# check_grads_layer(dropout, inputs, in_grads)
#
# ## Pooling
示例#26
0
    def train_cnn(self, image_name_list, epochs=1):
        self.train_data = []

        self.train_data.extend(self.get_train_data(image_name_list))
        shuffled_train_data = shuffle(self.train_data)
        _, _, ip_channels = shuffled_train_data[0][0].shape
        out_channels = 64
        filter_size = 3
        padding = 1
        lr = 0.0001

        layer_1 = Convolution(ip_channels, out_channels, filter_size, padding)
        relu_1 = ReLU()
        layer_2 = Convolution(out_channels, out_channels, filter_size, padding)
        relu_2 = ReLU()
        layer_3 = Convolution(out_channels, out_channels, filter_size, padding)
        relu_3 = ReLU()
        layer_4 = Convolution(out_channels, out_channels, filter_size, padding)
        relu_4 = ReLU()
        layer_5 = Convolution(out_channels, out_channels, filter_size, padding)
        relu_5 = ReLU()
        layer_6 = Convolution(out_channels, out_channels, filter_size, padding)
        relu_6 = ReLU()
        layer_7 = Convolution(out_channels, out_channels, filter_size, padding)
        relu_7 = ReLU()
        layer_8 = Convolution(out_channels, out_channels, filter_size, padding)
        relu_8 = ReLU()
        layer_9 = Convolution(out_channels, ip_channels, filter_size, padding)

        layers = [(layer_1, relu_1), (layer_2, relu_2), (layer_3, relu_3), (layer_4, relu_4), (layer_5, relu_5),
                  (layer_6, relu_6),
                  (layer_7, relu_7), (layer_8, relu_8), (layer_9, None)]
        cnn_obj = CNN(layers)
        for epoch in range(epochs):
            for x, y in self.train_data:
                cnn_obj = CNN(layers)
                loss, grads = cnn_obj.train_step(x, y)
                print("loss:", loss)
                params1, params2, params3, params4, params5, params6, params7, params8, params9 = grads
                dw1, db1 = params1
                dw2, db2 = params2
                dw3, db3 = params3
                dw4, db4 = params4
                dw5, db5 = params5
                dw6, db6 = params6
                dw7, db7 = params7
                dw8, db8 = params8
                dw9, db9 = params9
                layer_1.weights = layer_1.weights - (lr * dw1)
                layer_1.bias = layer_1.bias - (lr * db1)
                layer_2.weights = layer_2.weights - (lr * dw2)
                layer_2.bias = layer_2.bias - (lr * db2)
                layer_3.weights = layer_3.weights - (lr * dw3)
                layer_3.bias = layer_3.bias - (lr * db3)
                layer_4.weights = layer_4.weights - (lr * dw4)
                layer_4.bias = layer_4.bias - (lr * db4)
                layer_5.weights = layer_5.weights - (lr * dw5)
                layer_5.bias = layer_5.bias - (lr * db5)
                layer_6.weights = layer_6.weights - (lr * dw6)
                layer_6.bias = layer_6.bias - (lr * db6)
                layer_7.weights = layer_7.weights - (lr * dw7)
                layer_7.bias = layer_7.bias - (lr * db7)
                layer_8.weights = layer_8.weights - (lr * dw8)
                layer_8.bias = layer_8.bias - (lr * db8)
                layer_9.weights = layer_9.weights - (lr * dw9)
                layer_9.bias = layer_9.bias - (lr * db9)
                layers = [(layer_1, relu_1), (layer_2, relu_2), (layer_3, relu_3), (layer_4, relu_4), (layer_5, relu_5),
                          (layer_6, relu_6),
                          (layer_7, relu_7), (layer_8, relu_8), (layer_9, None)]

        return cnn_obj.params, layer_9.A_curr