Exemplo n.º 1
0
    def create_net(self, dropout=0.5):
        self.embed = layer.Dense('embed', 
                        self.embed_size,
			input_sample_shape=(self.vocab_size,
			))
        self.embed.to_device(self.dev)

        self.lstm = layer.LSTM(
                    name='lstm',
                    hidden_size=self.hidden_size,
                    num_stacks=self.num_stack_layers,
                    dropout=dropout,
                    input_sample_shape=(
                        self.embed_size,
                    ))
        self.lstm.to_device(self.dev)

        self.dense = layer.Dense(
                    'dense',
                    2, #output shape
                    input_sample_shape=(
                        self.hidden_size,
                    ))
        self.dense.to_device(self.dev)
        self.sft = layer.Softmax('softmax', 
	             input_sample_shape=(
                        2,
                    ))
        self.sft.to_device(self.dev)

        self.loss = loss.SoftmaxCrossEntropy()
Exemplo n.º 2
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    ConvBnReLU(net, 'conv1_1', 64, (3, 32, 32))
    net.add(layer.Dropout('drop1', 0.3))
    ConvBnReLU(net, 'conv1_2', 64)
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv2_1', 128)
    net.add(layer.Dropout('drop2_1', 0.4))
    ConvBnReLU(net, 'conv2_2', 128)
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv3_1', 256)
    net.add(layer.Dropout('drop3_1', 0.4))
    ConvBnReLU(net, 'conv3_2', 256)
    net.add(layer.Dropout('drop3_2', 0.4))
    ConvBnReLU(net, 'conv3_3', 256)
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv4_1', 512)
    net.add(layer.Dropout('drop4_1', 0.4))
    ConvBnReLU(net, 'conv4_2', 512)
    net.add(layer.Dropout('drop4_2', 0.4))
    ConvBnReLU(net, 'conv4_3', 512)
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv5_1', 512)
    net.add(layer.Dropout('drop5_1', 0.4))
    ConvBnReLU(net, 'conv5_2', 512)
    net.add(layer.Dropout('drop5_2', 0.4))
    ConvBnReLU(net, 'conv5_3', 512)
    net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dropout('drop_flat', 0.5))
    net.add(layer.Dense('ip1', 512))
    net.add(layer.BatchNormalization('batchnorm_ip1'))
    net.add(layer.Activation('relu_ip1'))
    net.add(layer.Dropout('drop_ip2', 0.5))
    net.add(layer.Dense('ip2', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, 3 * 3 * p.shape[0])
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Exemplo n.º 3
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    net.add(
        layer.Conv2D('conv1',
                     nb_kernels=32,
                     kernel=7,
                     stride=3,
                     pad=1,
                     input_sample_shape=input_shape))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv2', nb_kernels=64, kernel=5, stride=3))
    net.add(layer.Activation('relu2'))
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv3', nb_kernels=128, kernel=3, stride=1, pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv4', nb_kernels=256, kernel=3, stride=1))
    net.add(layer.Activation('relu4'))
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 256))
    net.add(layer.Activation('relu5'))
    net.add(layer.Dense('ip6', 16))
    net.add(layer.Activation('relu6'))
    net.add(layer.Dense('ip7', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Exemplo n.º 4
0
def create_net(depth, nb_classes, dense=0, use_cpu=True):
    if use_cpu:
        layer.engine = 'singacpp'

    net = densenet_base(depth)

    # this part was not included in the pytorch model
    if dense > 0:
        net.add(layer.Dense('hidden-dense', dense))
        net.add(layer.Activation('act-dense'))
        net.add(layer.Dropout('dropout'))

    net.add(layer.Dense('sigmoid', nb_classes))
    return net
Exemplo n.º 5
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    ConvBnReLUPool(net, 'conv1', 32, input_shape)
    ConvBnReLUPool(net, 'conv2', 64)
    ConvBnReLUPool(net, 'conv3', 128)
    ConvBnReLUPool(net, 'conv4', 128)
    ConvBnReLUPool(net, 'conv5', 256)
    ConvBnReLUPool(net, 'conv6', 256)
    ConvBnReLUPool(net, 'conv7', 512)
    ConvBnReLUPool(net, 'conv8', 512)

    net.add(layer.Flatten('flat'))

    net.add(layer.Dense('ip1', 256))
    net.add(layer.BatchNormalization('bn1'))
    net.add(layer.Activation('relu1'))
    net.add(layer.Dropout('dropout1', 0.2))

    net.add(layer.Dense('ip2', 16))
    net.add(layer.BatchNormalization('bn2'))
    net.add(layer.Activation('relu2'))
    net.add(layer.Dropout('dropout2', 0.2))

    net.add(layer.Dense('ip3', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Exemplo n.º 6
0
    def test_save_load(self):
        ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
        ffn.add(layer.Conv2D('conv', 4, 3, input_sample_shape=(3, 12, 12)))
        ffn.add(layer.Flatten('flat'))
        # ffn.add(layer.BatchNorm('bn'))
        ffn.add(layer.Dense('dense', num_output=4))
        for pname, pval in zip(ffn.param_names(), ffn.param_values()):
            pval.set_value(0.1)
        ffn.save('test_snaphost')
        ffn.save('test_pickle', use_pickle=True)

        ffn.load('test_snaphost')
        ffn.load('test_pickle', use_pickle=True)
Exemplo n.º 7
0
 def build_net(self):
     if self.use_cpu:
         layer.engine = 'singacpp'
     else:
         layer.engine = 'cudnn'
     self.net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(),
                                     metric.Accuracy())
     self.net.add(
         Reshape('reshape1', (self.vocab_size, ),
                 input_sample_shape=(self.maxlen, self.vocab_size)))
     self.net.add(layer.Dense('embed',
                              self.embed_size))  # output: (embed_size, )
     self.net.add(layer.Dropout('dropout'))
     self.net.add(Reshape('reshape2', (1, self.maxlen, self.embed_size)))
     self.net.add(
         layer.Conv2D('conv',
                      self.filters, (self.kernel_size, self.embed_size),
                      border_mode='valid'))  # output: (filter, embed_size)
     if self.use_cpu == False:
         self.net.add(layer.BatchNormalization('batchNorm'))
     self.net.add(layer.Activation('activ'))  # output: (filter, embed_size)
     self.net.add(layer.MaxPooling2D('max', stride=self.pool_size))
     self.net.add(layer.Flatten('flatten'))
     self.net.add(layer.Dense('dense', 2))
Exemplo n.º 8
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(
        layer.Conv2D("conv1", 16, 3, 1, pad=1, input_sample_shape=(3, 32, 32)))
    net.add(layer.BatchNormalization("bn1"))
    net.add(layer.Activation("relu1"))

    Block(net, "2a", 16, 1)
    Block(net, "2b", 16, 1)
    Block(net, "2c", 16, 1)

    Block(net, "3a", 32, 2)
    Block(net, "3b", 32, 1)
    Block(net, "3c", 32, 1)

    Block(net, "4a", 64, 2)
    Block(net, "4b", 64, 1)
    Block(net, "4c", 64, 1)

    net.add(layer.AvgPooling2D("pool4", 8, 8, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        # print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                # initializer.gaussian(p, 0, math.sqrt(2.0/p.shape[1]))
                initializer.gaussian(p, 0, 9.0 * p.shape[0])
            else:
                initializer.uniform(p, p.shape[0], p.shape[1])
        else:
            p.set_value(0)
        # print name, p.l1()

    return net
Exemplo n.º 9
0
 def test_train_one_batch(self):
     ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
     ffn.add(layer.Conv2D('conv', 4, 3, input_sample_shape=(3, 12, 12)))
     ffn.add(layer.Flatten('flat'))
     ffn.add(layer.Dense('dense', num_output=4))
     for pname, pval in zip(ffn.param_names(), ffn.param_values()):
         pval.set_value(0.1)
     x = tensor.Tensor((4, 3, 12, 12))
     x.gaussian(0, 0.01)
     y = np.asarray([[1, 0, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0]],
                    dtype=np.int32)
     y = tensor.from_numpy(y)
     o = ffn.forward(True, x)
     ffn.loss.forward(True, o, y)
     g = ffn.loss.backward()
     for pname, pvalue, pgrad in ffn.backward(g):
         self.assertEqual(len(pvalue), len(pgrad))
         for p, g in zip(pvalue, pgrad):
             self.assertEqual(p.size(), g.size())
Exemplo n.º 10
0
def create_net(in_shape, hyperpara, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    height, width, kernel_y, kernel_x, stride_y, stride_x = hyperpara[0], hyperpara[1], hyperpara[2], hyperpara[3], hyperpara[4], hyperpara[5]
    print ("kernel_x: ", kernel_x)
    print ("stride_x: ", stride_x)
    net = myffnet.ProbFeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(layer.Conv2D('conv1', 100, kernel=(kernel_y, kernel_x), stride=(stride_y, stride_x), pad=(0, 0),
                         input_sample_shape=(int(in_shape[0]), int(in_shape[1]), int(in_shape[2]))))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 1, pad=0))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('dense', 2))

    for (pname, pvalue) in zip(net.param_names(), net.param_values()):
        if len(pvalue.shape) > 1:
            initializer.gaussian(pvalue, pvalue.shape[0], pvalue.shape[1])
        else:
            pvalue.set_value(0)
        print (pname, pvalue.l1())
    return net
Exemplo n.º 11
0
        decoderw=param['decoder_w']
        densew,denseb=param['dense_w'],param['dense_b']
        hiddensize=param['hidden_size']
        numstacks=param['num_stacks']
        drop_out=param['dropout']
        vocab_size=7000
        cuda = device.create_cuda_gpu_on(1)
        encoder = layer.LSTM(name='lstm1', hidden_size=hiddensize, num_stacks=numstacks, dropout=drop_out, input_sample_shape=(vocab_size,))
        decoder = layer.LSTM(name='lstm2', hidden_size=hiddensize, num_stacks=numstacks, dropout=drop_out, input_sample_shape=(vocab_size,))
        encoder.to_device(cuda)
        decoder.to_device(cuda)
        encoder_w = encoder.param_values()[0]
        encoder_w.uniform(-0.08, 0.08)
        decoder.param_values()[0].copy_from_numpy(decoderw, offset=0)

        dense = layer.Dense('dense', vocab_size, input_sample_shape=(hiddensize,))
        dense.to_device(cuda)
        dense.param_values()[0].copy_from_numpy(densew,offset=0)
        dense.param_values()[1].copy_from_numpy(denseb,offset=0)
        
        
        metadata,idx_q,idx_a=load_data()
        idx2w=metadata['idx2w']
        batchq=idx_q[555:556]
        batcha=idx_a[555:556]

        inputs=convert(batchq,1,20,vocab_size,cuda)
        inputs.append(tensor.Tensor())
        inputs.append(tensor.Tensor())
        outputs = encoder.forward(model_pb2.kEval, inputs)[-2:]
        start = np.zeros((1,1,7000),dtype=np.float32)
Exemplo n.º 12
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    W0_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.0001}
    W1_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01}
    W2_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01, 'decay_mult': 250}

    b_specs = {'init': 'constant', 'value': 0, 'lr_mult': 2, 'decay_mult': 0}
    net.add(
        layer.Conv2D('conv1',
                     32,
                     5,
                     1,
                     W_specs=W0_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2,
                     input_sample_shape=(
                         3,
                         32,
                         32,
                     )))
    net.add(layer.MaxPooling2D('pool1', 3, 2, pad=1))
    net.add(layer.Activation('relu1'))
    net.add(layer.LRN(name='lrn1', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv2',
                     32,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu2'))
    net.add(layer.AvgPooling2D('pool2', 3, 2, pad=1))
    net.add(layer.LRN('lrn2', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv3',
                     64,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.AvgPooling2D('pool3', 3, 2, pad=1))
    net.add(layer.Flatten('flat'))
    net.add(
        layer.Dense('dense',
                    10,
                    W_specs=W2_specs.copy(),
                    b_specs=b_specs.copy()))
    for (p, specs) in zip(net.param_values(), net.param_specs()):
        filler = specs.filler
        if filler.type == 'gaussian':
            p.gaussian(filler.mean, filler.std)
        else:
            p.set_value(0)
        print specs.name, filler.type, p.l1()

    return net
Exemplo n.º 13
0
    def __init__(self,
                 dev,
                 rows=28,
                 cols=28,
                 channels=1,
                 noise_size=100,
                 hidden_size=128,
                 batch=128,
                 interval=1000,
                 learning_rate=0.001,
                 epochs=1000000,
                 d_steps=3,
                 g_steps=1,
                 dataset_filepath='mnist.pkl.gz',
                 file_dir='lsgan_images/'):
        self.dev = dev
        self.rows = rows
        self.cols = cols
        self.channels = channels
        self.feature_size = self.rows * self.cols * self.channels
        self.noise_size = noise_size
        self.hidden_size = hidden_size
        self.batch = batch
        self.batch_size = self.batch // 2
        self.interval = interval
        self.learning_rate = learning_rate
        self.epochs = epochs
        self.d_steps = d_steps
        self.g_steps = g_steps
        self.dataset_filepath = dataset_filepath
        self.file_dir = file_dir

        self.g_w0_specs = {
            'init': 'xavier',
        }
        self.g_b0_specs = {
            'init': 'constant',
            'value': 0,
        }
        self.g_w1_specs = {
            'init': 'xavier',
        }
        self.g_b1_specs = {
            'init': 'constant',
            'value': 0,
        }
        self.gen_net = ffnet.FeedForwardNet(loss.SquaredError(), )
        self.gen_net_fc_0 = layer.Dense(name='g_fc_0',
                                        num_output=self.hidden_size,
                                        use_bias=True,
                                        W_specs=self.g_w0_specs,
                                        b_specs=self.g_b0_specs,
                                        input_sample_shape=(self.noise_size, ))
        self.gen_net_relu_0 = layer.Activation(
            name='g_relu_0',
            mode='relu',
            input_sample_shape=(self.hidden_size, ))
        self.gen_net_fc_1 = layer.Dense(
            name='g_fc_1',
            num_output=self.feature_size,
            use_bias=True,
            W_specs=self.g_w1_specs,
            b_specs=self.g_b1_specs,
            input_sample_shape=(self.hidden_size, ))
        self.gen_net_sigmoid_1 = layer.Activation(
            name='g_relu_1',
            mode='sigmoid',
            input_sample_shape=(self.feature_size, ))
        self.gen_net.add(self.gen_net_fc_0)
        self.gen_net.add(self.gen_net_relu_0)
        self.gen_net.add(self.gen_net_fc_1)
        self.gen_net.add(self.gen_net_sigmoid_1)
        for (p, specs) in zip(self.gen_net.param_values(),
                              self.gen_net.param_specs()):
            filler = specs.filler
            if filler.type == 'gaussian':
                p.gaussian(filler.mean, filler.std)
            elif filler.type == 'xavier':
                initializer.xavier(p)
            else:
                p.set_value(0)
            print(specs.name, filler.type, p.l1())
        self.gen_net.to_device(self.dev)

        self.d_w0_specs = {
            'init': 'xavier',
        }
        self.d_b0_specs = {
            'init': 'constant',
            'value': 0,
        }
        self.d_w1_specs = {
            'init': 'xavier',
        }
        self.d_b1_specs = {
            'init': 'constant',
            'value': 0,
        }
        self.dis_net = ffnet.FeedForwardNet(loss.SquaredError(), )
        self.dis_net_fc_0 = layer.Dense(
            name='d_fc_0',
            num_output=self.hidden_size,
            use_bias=True,
            W_specs=self.d_w0_specs,
            b_specs=self.d_b0_specs,
            input_sample_shape=(self.feature_size, ))
        self.dis_net_relu_0 = layer.Activation(
            name='d_relu_0',
            mode='relu',
            input_sample_shape=(self.hidden_size, ))
        self.dis_net_fc_1 = layer.Dense(
            name='d_fc_1',
            num_output=1,
            use_bias=True,
            W_specs=self.d_w1_specs,
            b_specs=self.d_b1_specs,
            input_sample_shape=(self.hidden_size, ))
        self.dis_net.add(self.dis_net_fc_0)
        self.dis_net.add(self.dis_net_relu_0)
        self.dis_net.add(self.dis_net_fc_1)
        for (p, specs) in zip(self.dis_net.param_values(),
                              self.dis_net.param_specs()):
            filler = specs.filler
            if filler.type == 'gaussian':
                p.gaussian(filler.mean, filler.std)
            elif filler.type == 'xavier':
                initializer.xavier(p)
            else:
                p.set_value(0)
            print(specs.name, filler.type, p.l1())
        self.dis_net.to_device(self.dev)

        self.combined_net = ffnet.FeedForwardNet(loss.SquaredError(), )
        for l in self.gen_net.layers:
            self.combined_net.add(l)
        for l in self.dis_net.layers:
            self.combined_net.add(l)
        self.combined_net.to_device(self.dev)
Exemplo n.º 14
0
def train(data,
          max_epoch,
          hidden_size=100,
          seq_length=100,
          batch_size=16,
          num_stacks=1,
          dropout=0.5,
          model_path='model'):
    # SGD with L2 gradient normalization
    opt = optimizer.RMSProp(constraint=optimizer.L2Constraint(5))
    cuda = device.create_cuda_gpu()
    rnn = layer.LSTM(name='lstm',
                     hidden_size=hidden_size,
                     num_stacks=num_stacks,
                     dropout=dropout,
                     input_sample_shape=(data.vocab_size, ))
    rnn.to_device(cuda)
    print 'created rnn'
    rnn_w = rnn.param_values()[0]
    rnn_w.uniform(-0.08, 0.08)  # init all rnn parameters
    print 'rnn weight l1 = %f' % (rnn_w.l1())
    dense = layer.Dense('dense',
                        data.vocab_size,
                        input_sample_shape=(hidden_size, ))
    dense.to_device(cuda)
    dense_w = dense.param_values()[0]
    dense_b = dense.param_values()[1]
    print 'dense w ', dense_w.shape
    print 'dense b ', dense_b.shape
    initializer.uniform(dense_w, dense_w.shape[0], 0)
    print 'dense weight l1 = %f' % (dense_w.l1())
    dense_b.set_value(0)
    print 'dense b l1 = %f' % (dense_b.l1())

    g_dense_w = tensor.Tensor(dense_w.shape, cuda)
    g_dense_b = tensor.Tensor(dense_b.shape, cuda)

    lossfun = loss.SoftmaxCrossEntropy()
    for epoch in range(max_epoch):
        train_loss = 0
        for b in range(data.num_train_batch):
            batch = data.train_dat[b * batch_size:(b + 1) * batch_size]
            inputs, labels = convert(batch, batch_size, seq_length,
                                     data.vocab_size, cuda)
            inputs.append(tensor.Tensor())
            inputs.append(tensor.Tensor())

            outputs = rnn.forward(model_pb2.kTrain, inputs)[0:-2]
            grads = []
            batch_loss = 0
            g_dense_w.set_value(0.0)
            g_dense_b.set_value(0.0)
            for output, label in zip(outputs, labels):
                act = dense.forward(model_pb2.kTrain, output)
                lvalue = lossfun.forward(model_pb2.kTrain, act, label)
                batch_loss += lvalue.l1()
                grad = lossfun.backward()
                grad /= batch_size
                grad, gwb = dense.backward(model_pb2.kTrain, grad)
                grads.append(grad)
                g_dense_w += gwb[0]
                g_dense_b += gwb[1]
                # print output.l1(), act.l1()
            utils.update_progress(
                b * 1.0 / data.num_train_batch,
                'training loss = %f' % (batch_loss / seq_length))
            train_loss += batch_loss

            grads.append(tensor.Tensor())
            grads.append(tensor.Tensor())
            g_rnn_w = rnn.backward(model_pb2.kTrain, grads)[1][0]
            dense_w, dense_b = dense.param_values()
            opt.apply_with_lr(epoch, get_lr(epoch), g_rnn_w, rnn_w, 'rnnw')
            opt.apply_with_lr(epoch, get_lr(epoch), g_dense_w, dense_w,
                              'dense_w')
            opt.apply_with_lr(epoch, get_lr(epoch), g_dense_b, dense_b,
                              'dense_b')
        print '\nEpoch %d, train loss is %f' % \
            (epoch, train_loss / data.num_train_batch / seq_length)

        eval_loss = 0
        for b in range(data.num_test_batch):
            batch = data.val_dat[b * batch_size:(b + 1) * batch_size]
            inputs, labels = convert(batch, batch_size, seq_length,
                                     data.vocab_size, cuda)
            inputs.append(tensor.Tensor())
            inputs.append(tensor.Tensor())
            outputs = rnn.forward(model_pb2.kEval, inputs)[0:-2]
            for output, label in zip(outputs, labels):
                output = dense.forward(model_pb2.kEval, output)
                eval_loss += lossfun.forward(model_pb2.kEval, output,
                                             label).l1()
        print 'Epoch %d, evaluation loss is %f' % \
            (epoch, eval_loss / data.num_test_batch / seq_length)

        if (epoch + 1) % 30 == 0:
            # checkpoint the file model
            with open('%s_%d.bin' % (model_path, epoch), 'wb') as fd:
                print 'saving model to %s' % model_path
                d = {}
                for name, w in zip(['rnn_w', 'dense_w', 'dense_b'],
                                   [rnn_w, dense_w, dense_b]):
                    w.to_host()
                    d[name] = tensor.to_numpy(w)
                    w.to_device(cuda)
                d['idx_to_char'] = data.idx_to_char
                d['char_to_idx'] = data.char_to_idx
                d['hidden_size'] = hidden_size
                d['num_stacks'] = num_stacks
                d['dropout'] = dropout

                pickle.dump(d, fd)
Exemplo n.º 15
0
def sample(model_path, nsamples=100, seed_text='', do_sample=True):
    with open(model_path, 'rb') as fd:
        d = pickle.load(fd)
        rnn_w = tensor.from_numpy(d['rnn_w'])
        idx_to_char = d['idx_to_char']
        char_to_idx = d['char_to_idx']
        vocab_size = len(idx_to_char)
        dense_w = tensor.from_numpy(d['dense_w'])
        dense_b = tensor.from_numpy(d['dense_b'])
        hidden_size = d['hidden_size']
        num_stacks = d['num_stacks']
        dropout = d['dropout']

    cuda = device.create_cuda_gpu()
    rnn = layer.LSTM(name='lstm',
                     hidden_size=hidden_size,
                     num_stacks=num_stacks,
                     dropout=dropout,
                     input_sample_shape=(len(idx_to_char), ))
    rnn.to_device(cuda)
    rnn.param_values()[0].copy_data(rnn_w)
    dense = layer.Dense('dense',
                        vocab_size,
                        input_sample_shape=(hidden_size, ))
    dense.to_device(cuda)
    dense.param_values()[0].copy_data(dense_w)
    dense.param_values()[1].copy_data(dense_b)
    hx = tensor.Tensor((num_stacks, 1, hidden_size), cuda)
    cx = tensor.Tensor((num_stacks, 1, hidden_size), cuda)
    hx.set_value(0.0)
    cx.set_value(0.0)
    if len(seed_text) > 0:
        for c in seed_text:
            x = np.zeros((1, vocab_size), dtype=np.float32)
            x[0, char_to_idx[c]] = 1
            tx = tensor.from_numpy(x)
            tx.to_device(cuda)
            inputs = [tx, hx, cx]
            outputs = rnn.forward(False, inputs)
            y = dense.forward(False, outputs[0])
            y = tensor.softmax(y)
            hx = outputs[1]
            cx = outputs[2]
        sys.stdout.write(seed_text)
    else:
        y = tensor.Tensor((1, vocab_size), cuda)
        y.set_value(1.0 / vocab_size)

    for i in range(nsamples):
        y.to_host()
        prob = tensor.to_numpy(y)[0]
        if do_sample:
            cur = np.random.choice(vocab_size, 1, p=prob)[0]
        else:
            cur = np.argmax(prob)
        sys.stdout.write(idx_to_char[cur])
        x = np.zeros((1, vocab_size), dtype=np.float32)
        x[0, cur] = 1
        tx = tensor.from_numpy(x)
        tx.to_device(cuda)
        inputs = [tx, hx, cx]
        outputs = rnn.forward(False, inputs)
        y = dense.forward(False, outputs[0])
        y = tensor.softmax(y)
        hx = outputs[1]
        cx = outputs[2]
    print('')
Exemplo n.º 16
0
 def test_dense(self):
     dense = layer.Dense('ip', 32, input_sample_shape=(64, ))
     out_sample_shape = dense.get_output_sample_shape()
     self.check_shape(out_sample_shape, (32, ))
Exemplo n.º 17
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    ConvBnReLU(net, 'conv1_1', 64, input_shape)
    #net.add(layer.Dropout('drop1', 0.3))
    net.add(layer.MaxPooling2D('pool0', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv1_2', 128)
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv2_1', 128)
    net.add(layer.Dropout('drop2_1', 0.4))
    ConvBnReLU(net, 'conv2_2', 128)
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv3_1', 256)
    net.add(layer.Dropout('drop3_1', 0.4))
    ConvBnReLU(net, 'conv3_2', 256)
    net.add(layer.Dropout('drop3_2', 0.4))
    ConvBnReLU(net, 'conv3_3', 256)
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv4_1', 256)
    net.add(layer.Dropout('drop4_1', 0.4))
    ConvBnReLU(net, 'conv4_2', 256)
    net.add(layer.Dropout('drop4_2', 0.4))
    ConvBnReLU(net, 'conv4_3', 256)
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv5_1', 512)
    net.add(layer.Dropout('drop5_1', 0.4))
    ConvBnReLU(net, 'conv5_2', 512)
    net.add(layer.Dropout('drop5_2', 0.4))
    ConvBnReLU(net, 'conv5_3', 512)
    net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))
    #ConvBnReLU(net, 'conv6_1', 512)
    #net.add(layer.Dropout('drop6_1', 0.4))
    #ConvBnReLU(net, 'conv6_2', 512)
    #net.add(layer.Dropout('drop6_2', 0.4))
    #ConvBnReLU(net, 'conv6_3', 512)
    #net.add(layer.MaxPooling2D('pool6', 2, 2, border_mode='valid'))
    #ConvBnReLU(net, 'conv7_1', 512)
    #net.add(layer.Dropout('drop7_1', 0.4))
    #ConvBnReLU(net, 'conv7_2', 512)
    #net.add(layer.Dropout('drop7_2', 0.4))
    #ConvBnReLU(net, 'conv7_3', 512)
    #net.add(layer.MaxPooling2D('pool7', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))

    net.add(layer.Dense('ip1', 256))
    net.add(layer.BatchNormalization('bn1'))
    net.add(layer.Activation('relu1'))
    net.add(layer.Dropout('dropout1', 0.2))

    net.add(layer.Dense('ip2', 16))
    net.add(layer.BatchNormalization('bn2'))
    net.add(layer.Activation('relu2'))
    net.add(layer.Dropout('dropout2', 0.2))

    net.add(layer.Dense('ip3', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net