コード例 #1
0
    def createMapImage(self):
        """Create a QImage containing all the different layers of the map."""
        # create background, landscape and object layer
        self.background_layer = Layers.BackgroundLayer()
        self.landscape_layer = Layers.LandscapeLayer()
        self.object_layer = Layers.ObjectLayer()

        # create first tank
        tank_one = Objects.Tank(0, 200, (self.height // 2),
                                100, 100, core.Qt.yellow, core.Qt.cyan)
        self.tanks.append(tank_one)
        self.getNewCoordinates(self.tanks[0].tank_id, 0)

        # create second tank
        tank_two = Objects.Tank(
            1, (self.width - 200), (self.height // 2),
            100, 100, core.Qt.black, core.Qt.red
        )
        self.tanks.append(tank_two)
        self.getNewCoordinates(self.tanks[1].tank_id, 0)
        # paint tanks
        tmp_painter = gui.QPainter(self.object_layer)
        tmp_painter.drawImage(self.tanks[0].x_position,
                              self.tanks[0].y_position, self.tanks[0])
        tmp_painter.drawImage(self.tanks[1].x_position,
                              self.tanks[1].y_position, self.tanks[1])
        tmp_painter.end()

        # draw all layers
        self.drawMap()
        self.drawObjects()
        self.drawGame()
        self.setPixmap(gui.QPixmap.fromImage(self.game_image))
コード例 #2
0
ファイル: ACNN.py プロジェクト: OuYag/kaggle_learning
 def __init__(self, input, batch_size, rng, n_kernels):
     self.input = input
     self.conv_pool_1 = Layers.conv_3x3(
         input=self.input,
         rng=rng,
         input_shape=(batch_size, 1, 28, 28),
         filter_shape_1=(n_kernels[0], 1, 3, 3),
         filter_shape_2=(n_kernels[0], n_kernels[0], 3, 3),
         pool_size=(2, 2)
     )
     self.conv_pool_2 = Layers.conv_3x3(
         input=self.conv_pool_1.outputs,
         rng=rng,
         input_shape=(batch_size, n_kernels[0], 11, 11),
         filter_shape_1=(n_kernels[1], n_kernels[0], 3, 3),
         filter_shape_2=(n_kernels[1], n_kernels[1], 3, 3),
         pool_size=(2, 2)
     )
     hidden_layer_input = self.conv_pool_2.outputs.flatten(2)
     self.hidden_layer_1 = Layers.Hidden_layer(
         input=hidden_layer_input,
         rng=rng,
         n_in=n_kernels[1] * 3 * 3,
         n_out=192
     )
     self.softmax_layer = Layers.Logistic_layer(
         input=self.hidden_layer_1.outputs,
         rng=rng,
         n_in=192,
         n_out=10
     )
     self.results = self.softmax_layer.pred_y
     self.params = self.conv_pool_1.params + self.conv_pool_2.params + self.hidden_layer_1.params + self.softmax_layer.params
コード例 #3
0
ファイル: LeNet.py プロジェクト: OuYag/kaggle_learning
 def __init__(self, input, batch_size, rng, n_kernels):
     self.input = input
     self.conv_pool_1 = Layers.conv_pool_layer(
         input=self.input,
         rng=rng,
         input_shape=(batch_size, 1, 28, 28),
         filter_shape=(n_kernels[0], 1, 5, 5),
         pool_size=(2, 2)
     )
     self.conv_pool_2 = Layers.conv_pool_layer(
         input=self.conv_pool_1.outputs,
         rng=rng,
         input_shape=(batch_size, n_kernels[0], 12, 12),
         filter_shape=(n_kernels[1], n_kernels[0], 3, 3),
         pool_size=(2, 2)
     )
     hidden_layer_input = self.conv_pool_2.outputs.flatten(2)
     self.hidden_layer_1 = Layers.Hidden_layer(
         input=hidden_layer_input,
         rng=rng,
         n_in=n_kernels[1]*5*5,
         n_out=500
     )
     self.softmax_layer = Layers.Logistic_layer(
         input=self.hidden_layer_1.outputs,
         rng=rng,
         n_in=500,
         n_out=10
     )
     self.results = self.softmax_layer.pred_y
     self.params = self.conv_pool_1.params + self.conv_pool_2.params + self.hidden_layer_1.params + self.softmax_layer.params
def BL(template, dropout=0.1, regularizer=None, constraint=None):
    """
    
    inputs
    ----
    template: a list of network dimensions including input and output, e.g., [[40,10], [120,5], [3,1]]
    dropout: dropout percentage
    regularizer: keras regularizer object
    constraint: keras constraint object
    
    outputs
    ------
    keras model object
    """

    inputs = keras.layers.Input(template[0])

    x = inputs
    for k in range(1, len(template) - 1):
        x = Layers.BL(template[k], regularizer, constraint)(x)
        x = keras.layers.Activation('relu')(x)
        x = keras.layers.Dropout(dropout)(x)

    x = Layers.BL(template[-1], regularizer, constraint)(x)
    outputs = keras.layers.Activation('softmax')(x)

    model = keras.Model(inputs=inputs, outputs=outputs)

    optimizer = keras.optimizers.Adam(0.01)

    model.compile(optimizer, 'categorical_crossentropy', [
        'acc',
    ])

    return model
コード例 #5
0
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
                  target_sequence_length,
                  max_target_word_length,
                  source_vocab_size, target_vocab_size,
                  enc_embedding_size, dec_embedding_size,
                  rnn_size, num_layers, target_vocab_to_int):

#    Build the Sequence-to-Sequence model
#    :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
    enc_outputs, enc_states = Layers.encoding_layer(input_data, 
                                             rnn_size, 
                                             num_layers, 
                                             keep_prob, 
                                             source_vocab_size, 
                                             enc_embedding_size)
    
    dec_input = Model_Inputs.process_decoder_input(target_data, 
                                      target_vocab_to_int, 
                                      batch_size)
    
    train_output, infer_output = Layers.decoding_layer(dec_input,
                                               enc_states, 
                                               target_sequence_length, 
                                               max_target_word_length,
                                               rnn_size,
                                              num_layers,
                                              target_vocab_to_int,
                                              target_vocab_size,
                                              batch_size,
                                              keep_prob,
                                              dec_embedding_size)
    
    return train_output, infer_output
コード例 #6
0
def build_SIREN_model(dimensions):
    actual_model = tf.keras.Sequential()
    actual_model.add(Layers.FirstSirenLayer(dimensions[0], dimensions[1]))
    other_layers = []
    for dim0, dim1 in zip(dimensions[1:-2], dimensions[2:-1]):
        actual_model.add(Layers.MiddleSirenLayer(dim0, dim1))
    actual_model.add(Layers.FinalSirenLayer(dimensions[-2], dimensions[-1]))
    return actual_model
コード例 #7
0
	def transition_layer(self, x, name):
		with tf.variable_scope(name):
			input_tensor_depth = int(x.get_shape()[-1])
			output_depth = int(self.theta * input_tensor_depth)
			x = layers.batch_normalization(x, self.is_training, name = 'batch')
			x = tf.nn.relu(x)
			x = layers.convolution2d(x, 1, output_depth, weight_decay = CONV_WEIGHT_DECAY, bias = False, name = 'conv')
			x = tf.nn.dropout(x, self.keep_prob)
			x = layers.avg_pool_2d(x, kernel_size = 2, stride = 2, name="AvgPool2D")
			return x
コード例 #8
0
 def finalLayer(self, y, n_iters=1, learner_size=200):
     print "Final Layer"
     sigmoid = Layers.SigmoidLayer(self.X.shape[1],
                                   learner_size,
                                   noise=Noise.GaussianNoise(0.1))
     softmax = Layers.SoftmaxLayer(learner_size, y.shape[1])
     trainer = Trainer()
     sigmoid, softmax = trainer.train([sigmoid, softmax], self.X, y,
                                      n_iters)
     self.Layers.append(sigmoid)
     self.Layers.append(softmax)
コード例 #9
0
def ConstructNet(tokens, leafCntString, Wleft, Wright, Bidx, tokenMap, numFea,
                 tokenNum):

    leafCnt = leafCntString.split(' ')
    for idx, cnt in enumerate(leafCnt):
        leafCnt[idx] = int(leafCnt[idx])
    totalLeaf = sum(leafCnt) + .0
    leafCnt = np.array(leafCnt)
    leafCnt = leafCnt / totalLeaf

    layers = []

    rootLayer = Lay.layer( tokens[0], \
                       range(numFea*tokenNum, numFea*(tokenNum+1) ),\
                       numFea,
                )
    totalChildren = len(tokens)
    for idx in range(1, totalChildren):
        childName = tokens[idx]
        childIdx = tokenMap[childName]
        # cunstruct node (layer)
        childLayer = Lay.layer( name= childName,\
                        Bidx = range(numFea*childIdx, numFea*(childIdx+1)),\
                        numunit = numFea,
                     )
        # add connection
        if totalChildren == 2:
            leftCoef = .5
            rightCoef = .5
        else:
            rightCoef = (idx - 1.0) / (totalChildren - 2)
            leftCoef = 1 - rightCoef
        #print idx, len(leafCnt), len(tokens), leafCnt
        leftCoef *= leafCnt[idx - 1]
        rightCoef *= leafCnt[idx - 1]

        if leftCoef != 0:
            leftcon = Con.connection(childLayer, rootLayer, numFea, numFea,\
                         Wleft, leftCoef\
                        )
        if rightCoef != 0:
            rightcon =  Con.connection(childLayer, rootLayer, numFea, numFea,\
                         Wright, rightCoef\
                        )

        layers.append(childLayer)
    # end of each layer

    layers.append(rootLayer)

    for idx in xrange(0, len(layers) - 1):
        layers[idx].successiveUpper = layers[idx + 1]
        layers[idx + 1].successiveLower = layers[idx]
    return layers
コード例 #10
0
def build_orthogonal_model_with_SIREN_encoder(dimensions, use_bias=False):
    actual_model = tf.keras.Sequential()
    actual_model.add(Layers.FirstSirenLayer(dimensions[0], dimensions[1]))
    other_layers = []
    for dim0, dim1 in zip(dimensions[1:-2], dimensions[2:-1]):
        actual_model.add(Layers.Sinusoidal_BSNN(dim0, dim1, use_bias=use_bias))
    actual_model.add(
        Layers.Sinusoidal_BSNN(dimensions[-2],
                               dimensions[-1],
                               is_last=True,
                               use_bias=use_bias))
    return actual_model
コード例 #11
0
    def return_generated_decode_block(self):
        decode_block = nn.Sequential(
            Layers.resnet_decoder_layer(self.n * 8, self.n * 4,
                                        self.slope),  # (bs, 256, 64, 64)
            Layers.resnet_decoder_layer(self.n * 4, self.n * 2,
                                        self.slope),  # (bs, 128, 128, 128)
            Layers.resnet_decoder_layer(self.n * 2, self.n * 1,
                                        self.slope),  # (bs, 64, 256, 256)
            nn.Conv2d(self.n, self.img_channels, 7, 1, 3),  # (bs, 3, 256, 256)
            nn.Tanh())

        return decode_block
コード例 #12
0
	def bottleneck_layer(self, x, name):
		with tf.variable_scope(name):
			x = layers.batch_normalization(x, self.is_training, name = 'batch1')
			x = tf.nn.relu(x)
			x = layers.convolution2d(x, 1, self.bn_size * self.growth_rate, weight_decay = CONV_WEIGHT_DECAY, bias = False, name = 'conv1')
			x = tf.nn.dropout(x, self.keep_prob)

			x = layers.batch_normalization(x, self.is_training, name = 'batch2')
			x = tf.nn.relu(x)
			x = layers.convolution2d(x, 3, self.growth_rate, weight_decay = CONV_WEIGHT_DECAY, bias = False, name = 'conv2')
			x = tf.nn.dropout(x, self.keep_prob)
			return x
コード例 #13
0
def build_orthogonal_model_with_rotated_encoder(dimensions,
                                                use_bias=False,
                                                scale_factor=1.0):
    actual_model = tf.keras.Sequential()
    actual_model.add(
        PositionalEncoders.RotatedPositionalEncoderLayer(
            dimensions[0], dimensions[1], scale_factor=scale_factor))
    other_layers = []
    for dim0, dim1 in zip(dimensions[1:-2], dimensions[2:-1]):
        actual_model.add(Layers.Sinusoidal_BSNN(dim0, dim1))
    actual_model.add(
        Layers.Sinusoidal_BSNN(dimensions[-2], dimensions[-1], is_last=True))
    return actual_model
コード例 #14
0
 def __init__(self, hyperParams):
     self.hP = hyperParams
     
     #Instantiate Layers:
     self.hL = Layers.HiddenLayer(inputSize = self.hP.layerSizes[0], outputSize = self.hP.layerSizes[1], \
                               activationType = self.hP.activations[0])
     self.oL = Layers.OutputLayer(inputSize = self.hP.layerSizes[1], outputSize = self.hP.layerSizes[2], \
                               activationType = self.hP.activations[1])
     
     self.inputSize = self.hL.inputSize
     
     #Initialize Params
     self.params = Params([self.hL, self.oL])
コード例 #15
0
 def pre_train(self, X, epochs=1, noise_rate=0.3):
     self.structure = numpy.concatenate([[X.shape[1]], self.structure])
     self.X = X
     trainer = Trainer()
     print("Pre-training: ")  #, self.__repr__()
     for i in range(len(self.structure) - 1):
         #print ("Layer: %dx%d"%( self.structure[i], self.structure[i+1]))
         s1 = Layers.SigmoidLayer(self.structure[i],
                                  self.structure[i + 1],
                                  noise=Noise.SaltAndPepper(noise_rate))
         s2 = Layers.SigmoidLayer(self.structure[i + 1], self.X.shape[1])
         s1, s2 = trainer.train([s1, s2], self.X, self.X, epochs)
         self.X = s1.activate(self.X)
         self.Layers.append(s1)
コード例 #16
0
    def return_generated_down_stack(self):
        block = nn.ModuleList([
            Layers.down_grade_layer(self.img_channels,
                                    self.n,
                                    self.slope,
                                    norm=False),  # (bs, 128, 128, 64)
            Layers.down_grade_layer(self.n, self.n * 2,
                                    self.slope),  # (bs, 64, 64, 128)
            Layers.down_grade_layer(self.n * 2, self.n * 4,
                                    self.slope),  # (bs, 32, 32, 256)
            Layers.down_grade_layer(self.n * 4, self.n * 8,
                                    self.slope),  # (bs, 16, 16, 512)
            Layers.down_grade_layer(self.n * 8, self.n * 8,
                                    self.slope),  # (bs, 8, 8, 512)
            Layers.down_grade_layer(self.n * 8, self.n * 8,
                                    self.slope),  # (bs, 4, 4, 512)
            Layers.down_grade_layer(self.n * 8, self.n * 8,
                                    self.slope),  # (bs, 2, 2, 512)
            Layers.down_grade_layer(self.n * 8,
                                    self.n * 8,
                                    self.slope,
                                    norm=False),  # (bs, 1, 1, 512)
        ])

        return block
コード例 #17
0
    def return_generated_encode_block(self):
        encode_block = nn.Sequential(
            nn.Conv2d(self.img_channels, self.n, 7, 1,
                      3),  # (bs, 64, 256, 256)
            nn.LeakyReLU(self.slope, True),
            Layers.resnet_encoder_layer(self.n, self.n * 2,
                                        self.slope),  # (bs, 128, 128, 128)
            Layers.resnet_encoder_layer(self.n * 2, self.n * 4,
                                        self.slope),  # (bs, 256, 64, 64)
            Layers.resnet_encoder_layer(self.n * 4, self.n * 8,
                                        self.slope),  # (bs, 512, 32, 32)
        )

        return encode_block
コード例 #18
0
 def conv2d_transpose(
     self,
     name,
     in_tensor,
     kx,
     ky,
     kout,
     outshape,
     stride=None,
     biased=True,
     kernel_initializer=None,
     biase_initializer=None,
     padding='SAME',
 ):
     out, w, b = Layers.conv2d_transpose(
         in_tensor,
         name,
         kx,
         ky,
         kout,
         outshape,
         stride,
         biased,
         kernel_initializer,
         biase_initializer,
         padding,
         data_format=self.data_format,
     )
     self.weights.update({w.op.name: w})
     if biased:
         self.weights.update({b.op.name: b})
     return out
コード例 #19
0
def get_sensing_model(input_shape, target_shape):
    inputs = Input(input_shape)
    outputs = Layers.TensorEncoder(target_shape, name='linear_encoder')(inputs)

    model = Model(inputs=inputs, outputs=outputs)

    return model
コード例 #20
0
    def batch_norm(self, name, in_tensor, phase_train, reuse=None):

        return Layers.batch_norm(in_tensor,
                                 phase_train,
                                 name,
                                 reuse,
                                 data_format=self.data_format)
コード例 #21
0
ファイル: auto_encoder.py プロジェクト: codedecde/10-707-HW-2
 def __init__(self, in_dim, hidden_dim, weight_sharing=False):
     self.in_dim = in_dim
     self.n_dim = hidden_dim
     self.out_dim = in_dim
     self.weight_sharing = weight_sharing
     self.encoder = L.DenseLayer(self.in_dim, self.n_dim)
     self.decoder = L.DenseLayer(self.n_dim, self.out_dim)
     self._layers = ["encoder", "decoder"]
     if self.weight_sharing:
         for l in xrange(len(self._layers) // 2):
             encoder = getattr(self, self._layers[l])
             decoder = getattr(self,
                               self._layers[len(self._layers) - 1 - l])
             assert decoder.params['W'].data.shape == encoder.params[
                 'W'].data.transpose().shape
             decoder.params['W'].data = encoder.params['W'].data.transpose()
コード例 #22
0
    def __init__(self, ngpu, nz, ngf, nc, k):
        super(Generator, self).__init__()
        self.ngpu = ngpu

        layers = []

        d_in = 2**k
        layers.append(
            nn.ConvTranspose2d(nz, ngf * d_in, kernel_size, 1, 0, bias=False))
        layers.append(nn.BatchNorm2d(ngf * d_in))
        layers.append(nn.ReLU(True))
        # state size. (ngf*16) x 4 x 4

        #------------------------------------------
        for i in range(k):
            n = k - i
            layers.append(ll.GenLayerSN(ngf, n))
        #------------------------------------------

        layers.append(sa.Self_Attn(ngf, "relu"))

        layers.append(
            nn.ConvTranspose2d(ngf,
                               nc,
                               kernel_size,
                               stride,
                               padding,
                               bias=False))
        layers.append(nn.Tanh())
        # state size. (nc) x 128 x 128

        self.main = nn.ModuleList(layers)
コード例 #23
0
ファイル: Model.py プロジェクト: ddddwee1/TorchSUL
 def initialize(self,
                outsize,
                batch_norm=False,
                affine=True,
                activation=-1,
                usebias=True,
                norm=False):
     self.fc = L.fclayer(outsize, usebias, norm)
     self.batch_norm = batch_norm
     self.activation = activation
     if self.activation == PARAM_PRELU:
         self.act = torch.nn.PReLU(num_parameters=outchn)
     elif self.activation == PARAM_PRELU1:
         self.act = torch.nn.PReLU(num_parameters=1)
     if batch_norm:
         self.bn = L.BatchNorm(affine=affine)
コード例 #24
0
    def __init__(self):
        model_output = input_image = kr.Input(shape=HP.image_size +
                                              [HP.channel_size])
        model_output = kr.layers.Conv2D(filters=64,
                                        kernel_size=5,
                                        padding='same',
                                        activation=tf.nn.leaky_relu,
                                        use_bias=False)(model_output)
        model_output = HP.DiscriminatorNormLayer()(model_output)

        for _ in range(4):
            model_output = Layers.DownScale(conv_depth=0)(model_output)

        model_output = kr.layers.Flatten()(model_output)
        output_adversarial_value = kr.layers.Dense(
            units=1, activation='linear', dtype='float32')(model_output)

        if HP.is_acgan:
            output_classification_value = kr.layers.Dense(
                units=HP.attributes_size, activation='linear',
                dtype='float32')(model_output)
        else:
            output_classification_value = kr.layers.Dense(
                units=HP.attributes_size * 2,
                activation='linear',
                dtype='float32')(model_output)

        self.model = kr.Model(
            input_image,
            [output_adversarial_value, output_classification_value])
コード例 #25
0
 def finalLayer(self, X, y, epochs=1, n_neurons=200):
     print "Final Layer..."
     V = self.predict(X)
     # print(y)
     softmax = Layers.SoftmaxLayer(self.Layers[-1].W.shape[1], y.shape[1])
     softmax = Trainer().train([softmax], V, y, epochs)[0]
     self.Layers.append(softmax)
コード例 #26
0
    def __init__(self, ngpu, ndf, nc, k):
        super(Discriminator, self).__init__()
        self.ngpu = ngpu

        layers = []

        layers.append(nn.Conv2d(nc, ndf, kernel_size, stride=stride, padding=padding, bias=False) )
        layers.append(nn.LeakyReLU(0.2, inplace=True))
        # state size. (ndf) x 64 x 64

        #--------------------------------------------
        for i in range(k):
            layers.append(ll.DisLayerSN_d(ndf, i))
        #--------------------------------------------

        d_out = 2**k

        layers.append(sa.Self_Attn(ndf*d_out, "relu"))
       
        layers.append(sa.Self_Attn(ndf*d_out, "relu"))

        layers.append(nn.Conv2d(ndf * d_out, 1, kernel_size, stride=1, padding=0, bias=False))
        layers.append(nn.Sigmoid())
        # state size. 1
        
        self.main = nn.ModuleList(layers)
コード例 #27
0
ファイル: Model.py プロジェクト: ddddwee1/TorchSUL
 def initialize(self, chn):
     self.gx = L.conv2D(3, chn)
     self.gh = L.conv2D(3, chn)
     self.fx = L.conv2D(3, chn)
     self.fh = L.conv2D(3, chn)
     self.ox = L.conv2D(3, chn)
     self.oh = L.conv2D(3, chn)
     self.gx = L.conv2D(3, chn)
     self.gh = L.conv2D(3, chn)
コード例 #28
0
def training():

    train_images, train_labels = input_data.get_files(TRAIN_DIR)
    batch_images, batch_labels = input_data.get_batches(
        train_images, train_labels, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = CNN.CNN(batch_images, BATCH_SIZE, N_CLASSES)
    train_loss = Layers.loss(train_logits, batch_labels)
    train_op = Layers.optimize(train_loss, learning_rate)
    train_accuracy = Layers.accuracy(train_logits, batch_labels)

    summary_op = tf.summary.merge_all()

    sess = tf.Session()
    train_writer = tf.summary.FileWriter(LOGS_TRAIN_DIR, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        for step in range(MAX_STEP):
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run(
                [train_op, train_loss, train_accuracy])

            if step % 50 == 0:
                print(
                    'Step %d, the training loss is %.2f, train accuracy is %.2f%%'
                    % (step, tra_loss, tra_acc))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 2000 == 0:
                checkpoint_path = os.path.join(LOGS_TRAIN_DIR, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Training Done.')

    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
コード例 #29
0
def Res2NetLite(input_, base_channel = 32, is_training = True, data_format = 'channels_last', reuse = False, c = 72, stage_number =[3,7,3], name = 'Res2NetLite'):
    """
    the backbone of the detection model.
    
    """
    with tf.variable_scope(name_or_scope = name, reuse = reuse):
        layers = []
        #stage-0: 224,224
        bn1 = ly._bn(input_, data_format, is_training)
        #112,112,32
        conv1 = ly.conv_bn_activation(bn1, 32, 3, 2)
        #56,56,32
        maxp1 = ly.max_pooling(conv1, 3, 2)
        
        #stage-1
        #28,28,4c
        bottle1_s1 = md.Bottleneck(32, 4*c, name ='Bottleneck_stage1')
        out_s1 = bottle1_s1.forward(maxp1)
        for i in range(stage_number[0]):
            _name = 'res2block_'+str(i)+'_stage1'
            layers.append(md.Res2Block(4*c, name = _name))
            out_s1 = layers[-1].forward(out_s1)
        
        #stage-2
        #14,14,8c
        bottle1_s2 = md.Bottleneck(4*c, 8*c, name ='Bottleneck_stage2')
        out_s2 = bottle1_s2.forward(out_s1)
        for i in range(stage_number[1]):
            _name = 'res2block_'+str(i)+'_stage2'
            layers.append(md.Res2Block(8*c, name = _name))
            out_s2 = layers[-1].forward(out_s2)
        
        #stage-3
        #7,7,16c
        bottle1_s3 = md.Bottleneck(8*c, 16*c, name ='Bottleneck_stage3')
        out_s3 = bottle1_s3.forward(out_s2)
        for i in range(stage_number[2]):
            _name = 'res2block_'+str(i)+'_stage3'
            layers.append(md.Res2Block(16*c, name = _name))
            out_s3 = layers[-1].forward(out_s3)
        
        #stage-4
        out = ly.avg_pooling(out_s3, 7, 7)
        print(out)
        out = ly.conv_bn_activation(out, 1000, 1, 1)
        print(conv1,maxp1,out_s1,out_s2,out_s3)
        return out
コード例 #30
0
def TABL(template,
         dropout=0.1,
         projection_regularizer=None,
         projection_constraint=None,
         attention_regularizer=None,
         attention_constraint=None):
    """
    Temporal Attention augmented Bilinear Layer network, refer to the paper https://arxiv.org/abs/1712.00975
    
    inputs
    ----
    template: a list of network dimensions including input and output, e.g., [[40,10], [120,5], [3,1]]
    dropout: dropout percentage
    projection_regularizer: keras regularizer object for projection matrices
    projection_constraint: keras constraint object for projection matrices
    attention_regularizer: keras regularizer object for attention matrices
    attention_constraint: keras constraint object for attention matrices
    
    outputs
    ------
    keras model object
    """

    inputs = keras.layers.Input(template[0])

    x = inputs
    for k in range(1, len(template) - 1):
        x = Layers.BL(template[k], projection_regularizer,
                      projection_constraint)(x)
        x = keras.layers.Activation('relu')(x)
        x = keras.layers.Dropout(dropout)(x)

    x = Layers.TABL(template[-1], projection_regularizer,
                    projection_constraint, attention_regularizer,
                    attention_constraint)(x)
    outputs = keras.layers.Activation('softmax')(x)

    model = keras.Model(inputs=inputs, outputs=outputs)

    optimizer = keras.optimizers.Adam(0.01)

    model.compile(optimizer, 'categorical_crossentropy', [
        'acc',
    ])

    return model
コード例 #31
0
 def output_layer_delta(self, targets):
     output_layer_scores = self.layers[-1].get_transfer_output()
     output_layer_probabilities = Layers.get_probabilities_from_scores(
         output_layer_scores
     )
     output_layer_probabilities[range(np.shape(targets)[0]), targets] -= 1
     other_layer_delta = output_layer_probabilities / np.shape(targets)[0]
     return other_layer_delta
コード例 #32
0
ファイル: test.py プロジェクト: OuYag/kaggle_learning
import cv2
import matplotlib.pyplot as plt
import numpy as np

import Layers

rng = np.random.RandomState(1234)
image = cv2.imread('./dataset/images/1.jpg', 0)
input = image.reshape(1, 1, image.shape[0], image.shape[1])
input = np.array(input, dtype=np.float64)
conv_3x3 = Layers.conv_3x3(input, rng, input.shape, (4, 1, 3, 3), (6, 4, 3, 3))

out = conv_3x3.outputs
g_p = Layers.global_avearge_pool(out, pool_size=(2, 2))
value = g_p.outputs.eval()
print value.shape
for i in xrange(value.shape[1]):
    plt.subplot(2, 3, i + 1)
    plt.imshow(value[0, i, :, :], 'gray')
plt.show()
コード例 #33
0
ファイル: construct_senlstm.py プロジェクト: Lili-Mou/FFNN
def lstm(sen):
    layers = []
    
    ###########################################
    # construct a layer for each word
    # Layers
    # |-----vector-----------| (nWords)
    # constructing a layer
    # def __init__(self, name, Bidx, numunit):
    layers = []
    
    for idx, w in enumerate(sen):
        if w in vocab:
            word_id = vocab[w] # d is the word
        else:
            word_id = 0
        embedLayer = Lay.layer( w, word_id * numEmbed, numEmbed, '0')
        
        i = Lay.layer( 'i_' + w, B_i[0], numLSTM, 'l' )
        f = Lay.layer( 'f_' + w, B_f[0], numLSTM, 'l' )
        o = Lay.layer( 'o_' + w, B_o[0], numLSTM, 'l' )
        g = Lay.layer( 'g_' + w, B_g[0], numLSTM, 'l' )
        
        
        
        # c_tilde is the c after applying activation function
        c = Lay.layer( 'c_' + w, -1,  numLSTM, '0' )
        c_tilde = Lay.layer( 'c_tilde_' + w, -1, numLSTM, 't' )

        h = Lay.layer( 'h_' + w, -1,  numLSTM, '0' )

        layers.append(embedLayer)
        
        layers.append( i )
        layers.append( f )
        layers.append( o )
        layers.append( g )
        
        layers.append( c )
        layers.append( c_tilde )
        
        layers.append( h )
    
        
        ########################
        # connections within this time slot
        # connection:
        #   def __init__(self, xlayer, ylayer, Widx, Wcoef = 1.0)
        # BilinearConnection:
        #   def __init__(self, xlayer1, xlayer2, ylayer, Widx)
        Con.connection(embedLayer, i, W_i[0])
        Con.connection(embedLayer, f, W_f[0])
        Con.connection(embedLayer, o, W_o[0])
        Con.connection(embedLayer, g, W_g[0])
        
        Con.BilinearConnection( i, g, c, -1 )
        Con.connection( c, c_tilde, -1)
        Con.BilinearConnection( o, c_tilde, h, -1)
        
        ########################
        # recurrent connections
        # layers[-9]: hidden layer of last time slot (h)
        # layers[-11]: cell of last time slot (c)
        if idx != 0:
            Con.connection(layers[-9], i, U_i[0])
            Con.connection(layers[-9], f, U_f[0])
            Con.connection(layers[-9], o, U_o[0])
            Con.connection(layers[-9], g, U_g[0])
            
            #print 'layer[-9].name: ', layers[-9].name
            #print 'layer[-11].name: ', layers[-11].name
            
            # self loop in c:
            Con.BilinearConnection( layers[-11], f, c, -1)
    
    ###########################
    # output layer
    # softmax
    
    outlayer = Lay.layer('output', Bout[0], numOut, 's')
    Con.connection(layers[-1], outlayer, Wout[0])
    
    layers.append(outlayer)
    
    return layers