Exemple #1
0
def model():
    y1_dim = 50
    y2_dim = 100

    learning_rate = 0.01

    y1 = tf.placeholder('float32', [None, y1_dim])
    y2 = tf.placeholder('float32', [None, y2_dim])
    start1 = StartNode(input_vars=[y1])
    start2 = StartNode(input_vars=[y2])

    h1 = HiddenNode(prev=[start1, start2],
                    input_merge_mode=Concat(),
                    layers=[Linear(y1_dim + y2_dim, y2_dim),
                            RELU()])
    h2 = HiddenNode(prev=[start2], layers=[Linear(y2_dim, y2_dim), RELU()])
    h3 = HiddenNode(prev=[h1, h2],
                    input_merge_mode=Sum(),
                    layers=[Linear(y2_dim, y1_dim),
                            RELU()])
    e1 = EndNode(prev=[h3])
    e2 = EndNode(prev=[h2])

    graph = Graph(start=[start1, start2], end=[e1, e2])
    o1, o2 = graph.train_fprop()

    o1_mse = tf.reduce_mean((y1 - o1)**2)
    o2_mse = tf.reduce_mean((y2 - o2)**2)
    mse = o1_mse + o2_mse
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse)
    return y1, y2, o1, o2, optimizer
Exemple #2
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(num_filters=32, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(BatchNormalization())
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
        seq.add(LRN())

        seq.add(Conv2D(num_filters=64, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(BatchNormalization())
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2,2), padding='SAME'))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(128))
        seq.add(BatchNormalization())
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256))
        seq.add(BatchNormalization())
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(10))
        seq.add(Softmax())
    return seq
Exemple #3
0
def model3D(img=(84, 256, 256)):
    with tf.name_scope('WMH'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        seq.add(
            Conv3D(input_channels=1,
                   num_filters=10,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        #print("layer1: "+str(layerSize1))
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=10,
                   num_filters=20,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))
        #layerSize2 = updateConvLayerSize(layerSize1,convStride)
        #print("layer1: "+str(layerSize2))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=20,
                             num_filters=10,
                             output_shape=layerSize1,
                             kernel_size=(3, 3, 3),
                             stride=convStride,
                             padding='SAME'))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=10,
                             num_filters=3,
                             output_shape=img,
                             kernel_size=(3, 3, 3),
                             stride=(2, 2, 2),
                             padding='SAME'))
        ##
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=3,
                   num_filters=2,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        ##
        seq.add(Softmax())
        #seq.add(Sigmoid())
    return seq
Exemple #4
0
def model(word_len, sent_len, nclass):
    unicode_size = 1000
    ch_embed_dim = 20

    h, w = valid(ch_embed_dim,
                 word_len,
                 stride=(1, 1),
                 kernel_size=(ch_embed_dim, 5))
    h, w = valid(h, w, stride=(1, 1), kernel_size=(1, 5))
    h, w = valid(h, w, stride=(1, 2), kernel_size=(1, 5))
    conv_out_dim = int(h * w * 60)

    X_ph = tf.placeholder('int32', [None, sent_len, word_len])
    input_sn = tg.StartNode(input_vars=[X_ph])
    charcnn_hn = tg.HiddenNode(prev=[input_sn],
                               layers=[
                                   Reshape(shape=(-1, word_len)),
                                   Embedding(cat_dim=unicode_size,
                                             encode_dim=ch_embed_dim,
                                             zero_pad=True),
                                   Reshape(shape=(-1, ch_embed_dim, word_len,
                                                  1)),
                                   Conv2D(input_channels=1,
                                          num_filters=20,
                                          padding='VALID',
                                          kernel_size=(ch_embed_dim, 5),
                                          stride=(1, 1)),
                                   RELU(),
                                   Conv2D(input_channels=20,
                                          num_filters=40,
                                          padding='VALID',
                                          kernel_size=(1, 5),
                                          stride=(1, 1)),
                                   RELU(),
                                   Conv2D(input_channels=40,
                                          num_filters=60,
                                          padding='VALID',
                                          kernel_size=(1, 5),
                                          stride=(1, 2)),
                                   RELU(),
                                   Flatten(),
                                   Linear(conv_out_dim, nclass),
                                   Reshape((-1, sent_len, nclass)),
                                   ReduceSum(1),
                                   Softmax()
                               ])

    output_en = tg.EndNode(prev=[charcnn_hn])
    graph = tg.Graph(start=[input_sn], end=[output_en])
    y_train_sb = graph.train_fprop()[0]
    y_test_sb = graph.test_fprop()[0]

    return X_ph, y_train_sb, y_test_sb
Exemple #5
0
    def __init__(self, nclass, h, w, c):
        layers = []
        identityblk = IdentityBlock(input_channels=c,
                                    input_shape=[h, w],
                                    nlayers=10)
        layers.append(identityblk)

        layers.append(
            Conv2D(input_channels=c,
                   num_filters=16,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 16]))

        denseblk = DenseBlock(input_channels=16,
                              input_shape=[h, w],
                              growth_rate=4,
                              nlayers=4)
        layers.append(denseblk)

        layers.append(
            Conv2D(input_channels=denseblk.output_channels,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=32,
                   num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(BatchNormalization(input_shape=[h, w, nclass]))

        layers.append(
            AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'))
        layers.append(Flatten())
        layers.append(Softmax())

        self.startnode = tg.StartNode(input_vars=[None])
        model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
        self.endnode = tg.EndNode(prev=[model_hn])
Exemple #6
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(Conv2D(input_channels=c, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b1'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=96, num_filters=96, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b3'))
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=96, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b5'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(2, 2), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2,2), kernel_size=(3,3))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(3, 3), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b7'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(3,3))

        seq.add(Conv2D(input_channels=192, num_filters=192, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))
        seq.add(Dropout(0.5))

        seq.add(Conv2D(input_channels=192, num_filters=nclass, kernel_size=(1, 1), stride=(1, 1), padding='SAME'))
        seq.add(RELU())
        seq.add(TFBatchNormalization(name='b9'))
        h, w = same(in_height=h, in_width=w, stride=(1,1), kernel_size=(1,1))

        seq.add(AvgPooling(poolsize=(h, w), stride=(1,1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq
    def __init__(self, h, w, c):

        layers1 = []
        layers1.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME'))
        layers1.append(BatchNormalization(input_shape=[h,w,1]))
        layers1.append(RELU())

        layers2 = []
        layers2.append(Conv2D(input_channels=c, num_filters=1, kernel_size=(2,2), stride=(1,1), padding='SAME'))
        layers2.append(BatchNormalization(input_shape=[h,w,1]))
        layers2.append(RELU())

        self.startnode = tg.StartNode(input_vars=[None])
        hn1 = tg.HiddenNode(prev=[self.startnode], layers=layers1)
        hn2 = tg.HiddenNode(prev=[self.startnode], layers=layers2)
        hn3 = tg.HiddenNode(prev=[hn1, hn2], input_merge_mode=Sum())
        self.endnode = tg.EndNode(prev=[hn3])
Exemple #8
0
def model():
    with tf.name_scope('MnistCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(input_channels=1,
                   num_filters=32,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=28,
                    in_width=28,
                    stride=(1, 1),
                    kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 32]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())

        seq.add(
            Conv2D(input_channels=32,
                   num_filters=64,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        seq.add(BatchNormalization(input_shape=[h, w, 64]))
        seq.add(RELU())

        seq.add(MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'))
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(2, 2))
        seq.add(LRN())
        seq.add(Flatten())
        seq.add(Linear(int(h * w * 64), 128))
        seq.add(BatchNormalization(input_shape=[128]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(128, 256))
        seq.add(BatchNormalization(input_shape=[256]))
        seq.add(Tanh())
        seq.add(Dropout(0.8))
        seq.add(Linear(256, 10))
        seq.add(Softmax())
    return seq
Exemple #9
0
 def __init__(self, num_iter):
     self.num_iter = num_iter
     self.layers = []
     self.layers.append(
         Conv2D(input_channels=4,
                num_filters=8,
                kernel_size=(5, 5),
                stride=(1, 1),
                padding='SAME'))
     self.layers.append(
         BatchNormalization(layer_type='conv', dim=8, short_memory=0.01))
     self.layers.append(RELU())
     self.layers.append(
         Conv2D(input_channels=8,
                num_filters=1,
                kernel_size=(5, 5),
                stride=(1, 1),
                padding='SAME'))
     self.layers.append(RELU())
Exemple #10
0
 def __init__(self, num_blocks):
     self.num_blocks = num_blocks
     self.blocks = []
     for _ in range(self.num_blocks):
         layers = []
         layers.append(
             Conv3D(input_channels=1,
                    num_filters=1,
                    kernel_size=(5, 5, 5),
                    stride=(1, 1, 1),
                    padding='SAME'))
         layers.append(RELU())
         # layers.append(BatchNormalization(layer_type='conv', dim=8, short_memory=0.01))
         # layers.append(Conv3D(input_channels=16, num_filters=16, kernel_size=(5,5,5), stride=(1,1,1), padding='SAME'))
         # layers.append(RELU())
         # layers.append(BatchNormalization(layer_type='conv', dim=8, short_memory=0.01))
         layers.append(
             Conv3D(input_channels=1,
                    num_filters=1,
                    kernel_size=(5, 5, 5),
                    stride=(1, 1, 1),
                    padding='SAME'))
         layers.append(RELU())
         # layers.append(BatchNormalization(layer_type='conv', dim=8, short_memory=0.01))
         layers.append(
             Conv3D(input_channels=1,
                    num_filters=1,
                    kernel_size=(5, 5, 5),
                    stride=(1, 1, 1),
                    padding='SAME'))
         layers.append(RELU())
         self.blocks.append(layers)
     self.blocks.append([
         Conv3D(input_channels=1,
                num_filters=1,
                kernel_size=(3, 3, 3),
                stride=(1, 1, 1),
                padding='SAME')
     ])
    def __init__(self, nclass, h, w, c):
        layers = []
        template = TemplateModel(nclass, h, w, c)
        layers.append(template)
        layers.append(Flatten())
        layers.append(Linear(template.output_dim, 200))
        layers.append(RELU())
        layers.append(Linear(200, nclass))
        layers.append(Softmax())

        self.startnode = tg.StartNode(input_vars=[None])
        model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
        self.endnode = tg.EndNode(prev=[model_hn])
Exemple #12
0
 def __init__(self, input, num_blocks, kernel=(3, 3, 3)):
     self.num_blocks = num_blocks
     self.input = input
     self.kernel = kernel
     self.blocks = []
     for _ in range(self.num_blocks):
         layers = []
         layers.append(
             Conv3D(input_channels=self.input,
                    num_filters=self.input,
                    kernel_size=self.kernel,
                    stride=(1, 1, 1),
                    padding='SAME'))
         layers.append(RELU())
         layers.append(
             Conv3D(input_channels=self.input,
                    num_filters=self.input,
                    kernel_size=self.kernel,
                    stride=(1, 1, 1),
                    padding='SAME'))
         self.blocks.append(layers)
Exemple #13
0
 def __init__(self, input, BN_name, kernel=(3, 3, 3), iterate=1):
     self.layers = []
     self.int_ = 0
     self.input = input
     self.kernel = kernel
     self.iterate = iterate
     self.layers.append(
         Conv3D(input_channels=input,
                num_filters=input,
                kernel_size=kernel,
                stride=(1, 1, 1),
                padding='SAME'))
     self.layers.append(TFBatchNormalization(BN_name + str(self.int_)))
     self.int_ += 1
     self.layers.append(RELU())
     self.layers.append(
         Conv3D(input_channels=input,
                num_filters=input,
                kernel_size=kernel,
                stride=(1, 1, 1),
                padding='SAME'))
     self.layers.append(TFBatchNormalization(BN_name + str(self.int_)))
     self.int_ += 1
Exemple #14
0
def Vanilla_Classifier(X_train, y_train, X_valid, y_valid, restore):
    batchsize = 100
    learning_rate = 0.001
    _, h, w, c = X_train.shape
    _, nclass = y_train.shape
    
    g = tf.Graph()
    with g.as_default():
    
        data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize)
        data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize)

        X_ph = tf.placeholder('float32', [None, h, w, c])
        # y_ph = tf.placeholder('float32', [None, nclass])
        y_phs = []
        for comp in [nclass]:
            y_phs.append(tf.placeholder('float32', [None, comp]))
    
        dim = int(h*w*c)
        scope = 'encoder'
        start = tg.StartNode(input_vars=[X_ph])
        h1_Node = tg.HiddenNode(prev=[start], 
                                layers=[Sigmoid(),
                                        TFBatchNormalization(name= scope + '/vanilla1'),
                                        RELU(),
                                        Flatten(),
                                        Sigmoid(),
                                        TFBatchNormalization(name=scope + '/vanilla2')])
                                    
        h2_Node = tg.HiddenNode(prev=[h1_Node],
                                layers=[Linear(prev_dim=dim, this_dim=nclass),
                                        Softmax()])                                
        end_nodes = [tg.EndNode(prev=[h2_Node])]
    
        graph = Graph(start=[start], end=end_nodes)

        train_outs_sb = graph.train_fprop()
        test_outs = graph.test_fprop()
    
        ttl_mse = []
        # import pdb; pdb.set_trace()
        for y_ph, out in zip(y_phs, train_outs_sb):
            #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out)))
            ttl_mse.append(tf.reduce_mean((y_ph-out)**2))


        mse = sum(ttl_mse)
        #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse)
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse)
    
        saver = tf.train.Saver()
        vardir = './var/2'
        if not os.path.exists(vardir):
            os.makedirs(vardir)

        gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        tf.set_random_seed(1)
        init = tf.global_variables_initializer()
    
    
        with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess:
            # print '=======session start'
            sess.run(init)
            if restore == 1:
                re_saver = tf.train.Saver()
                re_saver.restore(sess, vardir + "/model.ckpt")
                print("Model restored.")
            max_epoch = 100
            temp_acc = []
            
            for epoch in range(max_epoch):

                train_error = 0
                train_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_train:
                    feed_dict = {X_ph:X_batch}
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch
                
                    sess.run(optimizer, feed_dict=feed_dict)
                    train_outs = sess.run(train_outs_sb, feed_dict=feed_dict)
                    train_error += total_mse(train_outs, [ys])[0]
                    train_accuracy += total_accuracy(train_outs, [ys])[0]
                    ttl_examples += len(X_batch)               

                valid_error = 0
                valid_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_valid:
                    feed_dict = {X_ph:X_batch}  
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch

                    valid_outs = sess.run(test_outs, feed_dict=feed_dict)
                    valid_error += total_mse(valid_outs, [ys])[0]
                    valid_accuracy += total_accuracy(valid_outs, [ys])[0]
                    ttl_examples += len(X_batch)

                save_path = saver.save(sess, vardir + "/model.ckpt")
                # print("Model saved in file: %s" % save_path)
                temp_acc.append(valid_accuracy/float(ttl_examples))
            print 'max accuracy is:\t', max(temp_acc)
Exemple #15
0
def Encoder_Classifier(X_train, y_train, X_valid, y_valid, restore):
    
    
    batchsize = 100
    learning_rate = 0.001
    _, h, w, c = X_train.shape
    _, nclass = y_train.shape
    
    g = tf.Graph()
    with g.as_default():
    
        data_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize)
        data_valid = tg.SequentialIterator(X_valid, y_valid, batchsize=batchsize)

        X_ph = tf.placeholder('float32', [None, h, w, c])
    
        y_phs = []
        for comp in [nclass]:
            y_phs.append(tf.placeholder('float32', [None, comp]))
    
    
        start = tg.StartNode(input_vars=[X_ph])
    
        h1, w1 = valid(h, w, filters=(5,5), strides=(1,1))
        h2, w2 = valid(h1, w1, filters=(5,5), strides=(2,2))
        h3, w3 = valid(h2, w2, filters=(5,5), strides=(2,2))
        flat_dim = int(h3*w3*32)
        scope = 'encoder'
        bottleneck_dim = 300
        enc_hn = tg.HiddenNode(prev=[start],
                               layers=[Conv2D(input_channels=c, num_filters=32, kernel_size=(5,5), stride=(1,1), padding='VALID'),
                                       TFBatchNormalization(name=scope + '/genc1'),
                                       RELU(),
                                       Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                                       TFBatchNormalization(name=scope + '/genc2'),
                                       RELU(),
                                       Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                                       TFBatchNormalization(name=scope + '/genc3'),
                                       RELU(),
                                       Flatten(),
                                       Linear(flat_dim, 300),
                                       TFBatchNormalization(name=scope + '/genc4'),
                                       RELU(),
                                       Linear(300, bottleneck_dim),
                                       Tanh()
                                       ])
                                       
        h2_Node = tg.HiddenNode(prev=[enc_hn],
                                layers=[Linear(prev_dim=bottleneck_dim, this_dim=nclass),
                                        Softmax()])
                                    
        end_nodes = [tg.EndNode(prev=[h2_Node])]
    
        graph = Graph(start=[start], end=end_nodes)

        train_outs_sb = graph.train_fprop()
        test_outs = graph.test_fprop()
    
        ttl_mse = []
        # import pdb; pdb.set_trace()
        for y_ph, out in zip(y_phs, train_outs_sb):
            #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out)))
            ttl_mse.append(tf.reduce_mean((y_ph-out)**2))


        mse = sum(ttl_mse)
        #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse)
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse)

        gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    
        # saver_init = tf.train.Saver()
        saver = tf.train.Saver()
        vardir = './var/1'
        if not os.path.exists(vardir):
            os.makedirs(vardir)
       
        tf.set_random_seed(1)
        init = tf.global_variables_initializer()
            
        with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess:
            sess.run(init)
            if restore == 1:
                re_saver = tf.train.Saver()
                re_saver.restore(sess, vardir + "/model.ckpt")
                print("Model restored.")
            
            # save_path = saver_init.save(sess, vardir + "/init.ckpt")
            # print("Model saved in file: %s" % save_path)
            max_epoch = 2
            temp_acc = []
            for epoch in range(max_epoch):
                # print 'epoch:', epoch
                train_error = 0
                train_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_train:
                    feed_dict = {X_ph:X_batch}
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch
                        # import pdb; pdb.set_trace() 
                    sess.run(optimizer, feed_dict=feed_dict)
                    train_outs = sess.run(train_outs_sb, feed_dict=feed_dict)
                    train_error += total_mse(train_outs, [ys])[0]
                    train_accuracy += total_accuracy(train_outs, [ys])[0]
                    ttl_examples += len(X_batch)

                valid_error = 0
                valid_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_valid:
                    feed_dict = {X_ph:X_batch}  
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch

                    valid_outs = sess.run(test_outs, feed_dict=feed_dict)
                    valid_error += total_mse(valid_outs, [ys])[0]
                    valid_accuracy += total_accuracy(valid_outs, [ys])[0]
                    ttl_examples += len(X_batch)


                temp_acc.append(valid_accuracy/float(ttl_examples))
            save_path = saver.save(sess, vardir + "/model.ckpt")
            print("Model saved in file: %s" % save_path)
            print 'max accuracy is:\t', max(temp_acc)        
Exemple #16
0
    def __init__(self, nclass, h, w, c):
        layers = []
        layers.append(
            Conv2D(input_channels=c,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 96]))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=96,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 96]))

        layers.append(
            Conv2D(input_channels=96,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 192]))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(2, 2), kernel_size=(3, 3))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(3, 3))
        layers.append(BatchNormalization(input_shape=[h, w, 192]))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=192,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(Dropout(0.5))

        layers.append(
            Conv2D(input_channels=192,
                   num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        layers.append(RELU())
        h, w = same(in_height=h, in_width=w, stride=(1, 1), kernel_size=(1, 1))
        layers.append(BatchNormalization(input_shape=[h, w, nclass]))

        layers.append(
            AvgPooling(poolsize=(h, w), stride=(1, 1), padding='VALID'))
        layers.append(Flatten())
        layers.append(Softmax())
        self.startnode = tg.StartNode(input_vars=[None])
        model_hn = tg.HiddenNode(prev=[self.startnode], layers=layers)
        self.endnode = tg.EndNode(prev=[model_hn])
Exemple #17
0
def model_Inception_Resnet(img=(84, 256, 256)):
    with tf.name_scope('WMH'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        kernelSize = (3, 3, 3)

        seq.add(
            Conv3D(input_channels=1,
                   num_filters=8,
                   kernel_size=(5, 5, 5),
                   stride=convStride,
                   padding='SAME'))
        #seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        seq.add(RELU())

        seq.add(InceptionResnet_3D(8, type='v2_out8'))

        seq.add(
            Conv3D(input_channels=8,
                   num_filters=16,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        #seq.add(TFBatchNormalization(name='b2'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize2 = updateConvLayerSize(layerSize1, poolStride)
        seq.add(RELU())

        seq.add(InceptionResnet_3D(16, type='v1_out16'))
        seq.add(
            Conv3D(input_channels=16,
                   num_filters=16,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        seq.add(RELU())

        seq.add(
            Conv3D(input_channels=16,
                   num_filters=32,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        #seq.add(TFBatchNormalization(name='b3'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        #layerSize3 = updateConvLayerSize(layerSize2,poolStride)
        seq.add(RELU())

        seq.add(InceptionResnet_3D(32, type='v1_out16'))
        seq.add(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=16,
                             output_shape=layerSize2,
                             kernel_size=kernelSize,
                             stride=poolStride,
                             padding='SAME'))
        seq.add(RELU())

        seq.add(InceptionResnet_3D(16, type='v1_out16'))
        seq.add(
            Conv3D(input_channels=16,
                   num_filters=16,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        seq.add(RELU())

        seq.add(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=8,
                             output_shape=layerSize1,
                             kernel_size=kernelSize,
                             stride=poolStride,
                             padding='SAME'))
        seq.add(RELU())

        seq.add(InceptionResnet_3D(8, type='v2_out8'))
        seq.add(
            Conv3D(input_channels=8,
                   num_filters=8,
                   kernel_size=kernelSize,
                   stride=convStride,
                   padding='SAME'))
        seq.add(RELU())

        # num_filter=3 --> Background, WhiteMatter, Others
        seq.add(
            Conv3D_Tranpose1(input_channels=8,
                             num_filters=3,
                             output_shape=img,
                             kernel_size=kernelSize,
                             stride=poolStride,
                             padding='SAME'))
        ##
        seq.add(RELU())
        seq.add(
            Conv3D(input_channels=3,
                   num_filters=3,
                   kernel_size=(1, 1, 1),
                   stride=convStride,
                   padding='SAME'))
        ##
        seq.add(Softmax())
    return seq
Exemple #18
0
    def __init__(self, h, w, c, z_dim=100, gf_dim=64, df_dim=64):

        self.z_dim = z_dim

        out_shape2 = same_nd([h, w], kernel_size=(5, 5), stride=(2, 2))
        out_shape4 = same_nd(out_shape2, kernel_size=(5, 5), stride=(2, 2))
        out_shape8 = same_nd(out_shape4, kernel_size=(5, 5), stride=(2, 2))
        out_shape16 = same_nd(out_shape8, kernel_size=(5, 5), stride=(2, 2))
        h16, w16 = out_shape16

        with tf.variable_scope('Generator'):
            self.g_layers = [
                Linear(z_dim, 8 * gf_dim * h16 * w16),
                Reshape([-1, h16, w16, 8 * gf_dim]),
                # TFBatchNormalization(name='gbn1'),
                BatchNormalization(input_shape=[h16, w16, 8 * gf_dim]),
                RELU(),
                Conv2D_Transpose(input_channels=8 * gf_dim,
                                 num_filters=4 * gf_dim,
                                 output_shape=out_shape8,
                                 kernel_size=(5, 5),
                                 stride=(2, 2),
                                 padding='SAME'),
                # TFBatchNormalization(name='gbn2'),
                BatchNormalization(input_shape=out_shape8 + [4 * gf_dim]),
                RELU(),
                Conv2D_Transpose(input_channels=4 * gf_dim,
                                 num_filters=2 * gf_dim,
                                 output_shape=out_shape4,
                                 kernel_size=(5, 5),
                                 stride=(2, 2),
                                 padding='SAME'),
                # TFBatchNormalization(name='gbn3'),
                BatchNormalization(input_shape=out_shape4 + [2 * gf_dim]),
                RELU(),
                Conv2D_Transpose(input_channels=2 * gf_dim,
                                 num_filters=gf_dim,
                                 output_shape=out_shape2,
                                 kernel_size=(5, 5),
                                 stride=(2, 2),
                                 padding='SAME'),
                # TFBatchNormalization(name='gbn4'),
                BatchNormalization(input_shape=out_shape2 + [gf_dim]),
                RELU(),
                Conv2D_Transpose(input_channels=gf_dim,
                                 num_filters=c,
                                 output_shape=(h, w),
                                 kernel_size=(5, 5),
                                 stride=(2, 2),
                                 padding='SAME'),
                # Sigmoid()
            ]

        out_shape2 = same_nd([h, w], kernel_size=(5, 5), stride=(2, 2))
        out_shape4 = same_nd(out_shape2, kernel_size=(5, 5), stride=(2, 2))
        out_shape8 = same_nd(out_shape4, kernel_size=(5, 5), stride=(2, 2))
        out_shape16 = same_nd(out_shape8, kernel_size=(5, 5), stride=(2, 2))
        h16, w16 = out_shape16

        with tf.variable_scope('Discriminator'):
            self.d1_layers = [
                Conv2D(input_channels=c,
                       num_filters=df_dim,
                       kernel_size=(5, 5),
                       stride=(2, 2),
                       padding='SAME'),
                LeakyRELU(),
                Conv2D(input_channels=df_dim,
                       num_filters=2 * df_dim,
                       kernel_size=(5, 5),
                       stride=(2, 2),
                       padding='SAME'),
            ]
            # TFBatchNormalization(name='dbn1'),
            self.d2_layers = [
                BatchNormalization(input_shape=out_shape4 + [2 * df_dim]),
                LeakyRELU(),
                Conv2D(input_channels=2 * df_dim,
                       num_filters=4 * df_dim,
                       kernel_size=(5, 5),
                       stride=(2, 2),
                       padding='SAME'),
            ]

            self.d3_layers = [

                # TFBatchNormalization(name='dbn2'),
                BatchNormalization(input_shape=out_shape8 + [4 * df_dim]),
                LeakyRELU(),
                Conv2D(input_channels=4 * df_dim,
                       num_filters=8 * df_dim,
                       kernel_size=(5, 5),
                       stride=(2, 2),
                       padding='SAME'),
            ]
            self.d4_layers = [
                # TFBatchNormalization(name='dbn3'),
                BatchNormalization(input_shape=out_shape16 + [8 * df_dim]),
                LeakyRELU(),
                ReduceMax(reduction_indices=[1, 2]),
            ]
            self.d5_layers = [
                Flatten(),
                Linear(8 * df_dim, 1),
                # LeakyRELU(),
                # Linear(1000, 1)
                #    Sigmoid()
            ]
            print('====:', 8 * df_dim)
Exemple #19
0
def model3D_2(img=(84, 256, 256)):
    with tf.name_scope('WMH_2Chan_Input'):
        seq = tg.Sequential()
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        seq.add(
            Conv3D(input_channels=1,
                   num_filters=8,
                   kernel_size=(5, 5, 5),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b1'))
        seq.add(
            MaxPool3D(poolsize=(2, 2, 2), stride=poolStride, padding='SAME'))
        layerSize1 = updateConvLayerSize(img, poolStride)
        seq.add(RELU())

        seq.add(
            Conv3D(input_channels=8,
                   num_filters=16,
                   kernel_size=(3, 3, 3),
                   stride=convStride,
                   padding='SAME'))
        seq.add(TFBatchNormalization(name='b2'))

        ## Extra MaxPool
        #seq.add(MaxPool3D(poolsize=(2,2,2), stride=poolStride, padding='SAME'))
        #layerSize2 = updateConvLayerSize(layerSize1,convStride)
        #seq.add(RELU())
        ## Extra Conv
        #seq.add(Conv3D(input_channels=16, num_filters=16, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #seq.add(TFBatchNormalization(name='b3'))

        seq.add(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=8,
                             output_shape=layerSize1,
                             kernel_size=(3, 3, 3),
                             stride=convStride,
                             padding='SAME'))
        seq.add(TFBatchNormalization(name='b4'))
        seq.add(RELU())
        seq.add(
            Conv3D_Tranpose1(input_channels=8,
                             num_filters=2,
                             output_shape=img,
                             kernel_size=(3, 3, 3),
                             stride=poolStride,
                             padding='SAME'))
        #seq.add(TFBatchNormalization(name='b5'))
        #seq.add(RELU())

        #seq.add(Conv3D(input_channels=2, num_filters=2, kernel_size=(1,1,1), stride=convStride, padding='SAME'))
        ##
        ##
        #seq.add(RELU())
        #seq.add(Conv3D(input_channels=2, num_filters=2, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #        seq.add(RELU())
        #        seq.add(Conv3D(input_channels=3, num_filters=3, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #        seq.add(RELU())
        #        seq.add(Conv3D(input_channels=3, num_filters=3, kernel_size=(3,3,3), stride=convStride, padding='SAME'))
        #
        ##
        seq.add(Softmax())
        #seq.add(Sigmoid())
    return seq
Exemple #20
0
def CNN_Classifier(X_train, y_train, X_valid, y_valid, restore):
    batchsize = 64
    learning_rate = 0.001
    _, h, w, c = X_train.shape
    _, nclass = y_train.shape

    g = tf.Graph()
    with g.as_default():
        data_train = tg.SequentialIterator(X_train,
                                           y_train,
                                           batchsize=batchsize)
        data_valid = tg.SequentialIterator(X_valid,
                                           y_valid,
                                           batchsize=batchsize)

        X_ph = tf.placeholder('float32', [None, h, w, c])

        y_phs = []
        for comp in [nclass]:
            y_phs.append(tf.placeholder('float32', [None, comp]))

        start = tg.StartNode(input_vars=[X_ph])

        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(1, 1), filters=(2, 2))
        h, w = valid(in_height=h, in_width=w, strides=(2, 2), filters=(2, 2))
        # import pdb; pdb.set_trace()
        #h1, w1 = valid(ch_embed_dim, word_len, strides=(1,1), filters=(ch_embed_dim,4))
        num = 32
        dim = int(h * w * num)

        h1_Node = tg.HiddenNode(prev=[start],
                                layers=[
                                    Conv2D(input_channels=c,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer1'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer2'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer3'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer4'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer5'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer6'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer7'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer8'),
                                    RELU(),
                                    Dropout(dropout_below=0.5),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer9'),
                                    RELU(),
                                    Conv2D(input_channels=num,
                                           num_filters=num,
                                           padding='VALID',
                                           kernel_size=(2, 2),
                                           stride=(1, 1)),
                                    TFBatchNormalization(name='layer10'),
                                    RELU(),
                                    MaxPooling(poolsize=(2, 2),
                                               stride=(2, 2),
                                               padding='VALID'),
                                    Reshape(shape=(-1, dim))
                                ])

        h2_Node = tg.HiddenNode(
            prev=[h1_Node],
            layers=[Linear(prev_dim=dim, this_dim=nclass),
                    Softmax()])

        end_nodes = [tg.EndNode(prev=[h2_Node])]

        graph = Graph(start=[start], end=end_nodes)

        train_outs_sb = graph.train_fprop()
        test_outs = graph.test_fprop()

        ttl_mse = []
        # import pdb; pdb.set_trace()
        for y_ph, out in zip(y_phs, train_outs_sb):
            #ttl_mse.append(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_ph, out)))
            ttl_mse.append(tf.reduce_mean((y_ph - out)**2))

        mse = sum(ttl_mse)
        #optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse)
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(mse)

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        saver = tf.train.Saver()
        vardir = './var/5'
        if not os.path.exists(vardir):
            os.makedirs(vardir)

        tf.set_random_seed(1)
        init = tf.global_variables_initializer()
        with tf.Session(config=tf.ConfigProto(
                gpu_options=gpu_options)) as sess:

            sess.run(init)
            if restore == 1:
                re_saver = tf.train.Saver()
                re_saver.restore(sess, vardir + "/model.ckpt")
                print("Model restored.")

            max_epoch = 100
            temp_acc = []
            for epoch in range(max_epoch):
                # print 'epoch:', epoch
                train_error = 0
                train_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_train:
                    feed_dict = {X_ph: X_batch}
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch
                        # import pdb; pdb.set_trace()
                    sess.run(optimizer, feed_dict=feed_dict)
                    train_outs = sess.run(train_outs_sb, feed_dict=feed_dict)
                    train_error += total_mse(train_outs, [ys])[0]
                    train_accuracy += total_accuracy(train_outs, [ys])[0]
                    ttl_examples += len(X_batch)

                valid_error = 0
                valid_accuracy = 0
                ttl_examples = 0
                for X_batch, ys in data_valid:
                    feed_dict = {X_ph: X_batch}
                    for y_ph, y_batch in zip(y_phs, [ys]):
                        feed_dict[y_ph] = y_batch

                    valid_outs = sess.run(test_outs, feed_dict=feed_dict)
                    valid_error += total_mse(valid_outs, [ys])[0]
                    valid_accuracy += total_accuracy(valid_outs, [ys])[0]
                    ttl_examples += len(X_batch)

                temp_acc.append(valid_accuracy / float(ttl_examples))
            save_path = saver.save(sess, vardir + "/model.ckpt")
            print("Model saved in file: %s" % save_path)
            print 'max accuracy is:\t', max(temp_acc)
Exemple #21
0
def model(nclass, h, w, c):
    with tf.name_scope('Cifar10AllCNN'):
        seq = tg.Sequential()
        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=96,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(2, 2),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(3, 3),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(
            Conv2D(num_filters=192,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(Dropout(0.5))

        seq.add(
            Conv2D(num_filters=nclass,
                   kernel_size=(1, 1),
                   stride=(1, 1),
                   padding='SAME'))
        seq.add(RELU())
        seq.add(BatchNormalization())

        seq.add(AvgPooling(poolsize=(8, 8), stride=(1, 1), padding='VALID'))
        seq.add(Flatten())
        seq.add(Softmax())
    return seq
Exemple #22
0
    def generator(self):
        self.generator_called = True
        with self.tf_graph.as_default():
            scope = 'Generator'
            with tf.name_scope(scope):
                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                print('h1:{}, w1:{}'.format(h1, w1))
                print('h2:{}, w2:{}'.format(h2, w2))
                print('h3:{}, w3:{}'.format(h3, w3))
                print('flat dim:{}'.format(flat_dim))

                self.gen_real_sn = tg.StartNode(input_vars=[self.real_ph])

                enc_hn = tg.HiddenNode(
                    prev=[self.gen_real_sn],
                    layers=[
                        Conv2D(input_channels=self.c,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(1, 1),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc1'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc2'),
                        RELU(),
                        Conv2D(input_channels=32,
                               num_filters=32,
                               kernel_size=(5, 5),
                               stride=(2, 2),
                               padding='VALID'),
                        TFBatchNormalization(name=scope + '/genc3'),
                        RELU(),
                        #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        #    RELU(),
                        Flatten(),
                        Linear(flat_dim, 300),
                        TFBatchNormalization(name=scope + '/genc4'),
                        RELU(),
                        Linear(300, self.bottleneck_dim),
                        Tanh(),
                    ])

                self.noise_sn = tg.StartNode(input_vars=[self.noise_ph])

                self.gen_hn = tg.HiddenNode(
                    prev=[self.noise_sn, enc_hn],
                    input_merge_mode=Sum(),
                    layers=[
                        Linear(self.bottleneck_dim, flat_dim),
                        RELU(),

                        ######[ Method 0 ]######
                        #    Reshape((-1, h3, w3, 32)),
                        #    Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2),
                        #                     kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        ######[ End Method 0 ]######

                        ######[ Method 1 ]######
                        Reshape((-1, 1, 1, flat_dim)),
                        #    Reshape((-1, h))
                        Conv2D_Transpose(input_channels=flat_dim,
                                         num_filters=200,
                                         output_shape=(h3, w3),
                                         kernel_size=(h3, w3),
                                         stride=(1, 1),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=200, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g1'),
                        RELU(),
                        Conv2D_Transpose(input_channels=200,
                                         num_filters=100,
                                         output_shape=(h2, w2),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=100, short_memory=0.01),
                        ######[ End Method 1 ]######
                        TFBatchNormalization(name=scope + '/g2'),
                        RELU(),
                        Conv2D_Transpose(input_channels=100,
                                         num_filters=50,
                                         output_shape=(h1, w1),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        #    BatchNormalization(layer_type='conv', dim=50, short_memory=0.01),
                        TFBatchNormalization(name=scope + '/g3'),
                        RELU(),
                        Conv2D_Transpose(input_channels=50,
                                         num_filters=self.c,
                                         output_shape=(self.h, self.w),
                                         kernel_size=(5, 5),
                                         stride=(1, 1),
                                         padding='VALID'),
                        SetShape((-1, self.h, self.w, self.c)),
                        Sigmoid()
                    ])

                h, w = valid(self.h, self.w, kernel_size=(5, 5), stride=(1, 1))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(5, 5), stride=(2, 2))
                h, w = valid(h, w, kernel_size=(h3, w3), stride=(1, 1))

                y_en = tg.EndNode(prev=[self.gen_hn])

                graph = tg.Graph(start=[self.noise_sn, self.gen_real_sn],
                                 end=[y_en])

                G_train_sb = graph.train_fprop()[0]
                G_test_sb = graph.test_fprop()[0]
                gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return self.y_ph, self.noise_ph, G_train_sb, G_test_sb, gen_var_list
Exemple #23
0
def Residual_UNET(input, img=(84, 256, 256)):
    with tf.name_scope('WMH'):
        convStride = (1, 1, 1)
        poolStride = (2, 2, 2)
        kSize3 = (3, 3, 3)
        #kSize5 = (5,5,5)

        #x_dim = 50
        #component_dim = 100
        #batchsize = 32
        #learning_rate = 0.01
        #x_ph = tf.placeholder('float32', [None, x_dim])
        #start = StartNode(input_vars=[x_ph])
        #h1 = HiddenNode(prev=[start], layers=[Linear(x_dim, component_dim), Softmax()])
        #e1 = EndNode(prev=[h1], input_merge_mode=Sum())
        #e3 = EndNode(prev=[h1, h2, h3], input_merge_mode=Sum())

        start = StartNode(input_vars=[input])

        Layer01 = [
            Conv3D(input_channels=1,
                   num_filters=8,
                   kernel_size=kSize3,
                   stride=convStride,
                   padding='SAME')
        ]
        #Layer01.append(RELU())

        LayerPool = MaxPool3D(poolsize=(2, 2, 2),
                              stride=poolStride,
                              padding='SAME')

        Layer02 = [LayerPool]
        #Layer02.append(Testing(1))
        Layer02.append(
            Conv3D(input_channels=8,
                   num_filters=16,
                   kernel_size=kSize3,
                   stride=convStride,
                   padding='SAME'))
        #Layer02.append(RELU())
        #Layer02.append(Testing(2))
        Layer02.append(ResidualBlock3D(16, 'L02'))
        #Layer02.append(Testing(3))
        layerSize1 = updateConvLayerSize(img, poolStride)

        Layer03 = [LayerPool]
        Layer03.append(
            Conv3D(input_channels=16,
                   num_filters=32,
                   kernel_size=kSize3,
                   stride=convStride,
                   padding='SAME'))
        #Layer03.append(RELU())
        Layer03.append(ResidualBlock3D(32, 'L03'))
        #layerSize2 = updateConvLayerSize(layerSize1,poolStride)
        Layer03.append(
            Conv3D_Tranpose1(input_channels=32,
                             num_filters=16,
                             output_shape=layerSize1,
                             kernel_size=kSize3,
                             stride=poolStride,
                             padding='SAME'))
        #Layer03.append(RELU())

        #Layer04 = [Conv3D(input_channels=32, num_filters=64, kernel_size=kSize5, stride=convStride, padding='SAME')]
        #Layer04.append(ResidualBlock3D(64,'L03'))

        conv8 = HiddenNode(prev=[start], layers=Layer01)

        resBlock16 = HiddenNode(prev=[conv8], layers=Layer02)

        resBlock32_16 = HiddenNode(prev=[resBlock16], layers=Layer03)
        residualLong16 = HiddenNode(prev=[resBlock32_16, resBlock16],
                                    input_merge_mode=Sum())

        Layer04 = [ResidualBlock3D(16, 'L04')]
        Layer04.append(
            Conv3D_Tranpose1(input_channels=16,
                             num_filters=8,
                             output_shape=img,
                             kernel_size=kSize3,
                             stride=poolStride,
                             padding='SAME'))
        Layer04.append(RELU())

        resBlock16_8 = HiddenNode(prev=[residualLong16], layers=Layer04)
        residualLong8 = HiddenNode(prev=[resBlock16_8, conv8],
                                   input_merge_mode=Sum())

        Layer05 = [ResidualBlock3D(8, 'L05')]
        Layer05.append(
            Conv3D(input_channels=8,
                   num_filters=2,
                   kernel_size=kSize3,
                   stride=convStride,
                   padding='SAME'))
        Layer05.append(Softmax())

        resBlock8_2 = HiddenNode(prev=[residualLong8], layers=Layer05)

        endNode = EndNode(prev=[resBlock8_2], input_merge_mode=NoChange())

        graph = Graph(start=[start], end=[endNode])
        #o1, o2, o3 = graph.train_fprop()
        #o1_mse = tf.reduce_mean((y1_ph - o1)**2)
        #o2_mse = tf.reduce_mean((y2_ph - o2)**2)

    return graph
Exemple #24
0
def train():

    batchsize = 64
    learning_rate = 0.001
    max_epoch = 100

    # batch x depth x height x width x channel
    X_train = np.random.rand(1000, 20, 32, 32, 1)
    M_train = np.random.rand(1000, 20, 32, 32, 1)

    X_valid = np.random.rand(1000, 20, 32, 32, 1)
    M_valid = np.random.rand(1000, 20, 32, 32, 1)

    X_ph = tf.placeholder('float32', [None, 20, 32, 32, 1])
    M_ph = tf.placeholder('float32', [None, 20, 32, 32, 1])

    h, w = 32, 32

    model = tg.Sequential()
    # iter_model = tg.Sequential()
    model.add(
        Conv3D(input_channels=1,
               num_filters=8,
               kernel_size=(5, 5, 5),
               stride=(1, 1, 1),
               padding='SAME'))
    model.add(RELU())
    model.add(
        Conv3D(input_channels=8,
               num_filters=1,
               kernel_size=(5, 5, 5),
               stride=(1, 1, 1),
               padding='SAME'))
    # iter_model.add(RELU())
    # model.add(Iterative(sequential=iter_model, num_iter=1))
    model.add(Sigmoid())

    M_train_s = model.train_fprop(X_ph)
    M_valid_s = model.test_fprop(X_ph)

    train_mse = tf.reduce_mean((M_ph - M_train_s)**2)
    valid_mse = tf.reduce_mean((M_ph - M_valid_s)**2)
    # train_mse = entropy(M_ph, M_train_s)
    # valid_mse = entropy(M_ph, M_valid_s)
    valid_f1 = image_f1(tf.to_int32(M_ph), tf.to_int32(M_valid_s >= 0.5))

    data_train = tg.SequentialIterator(X_train, M_train, batchsize=batchsize)
    data_valid = tg.SequentialIterator(X_valid, M_valid, batchsize=batchsize)

    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_mse)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        for epoch in range(max_epoch):
            print('epoch:', epoch)
            print('..training')
            pbar = ProgressBar(len(data_train))
            n_exp = 0
            for X_batch, M_batch in data_train:
                pbar.update(n_exp)
                sess.run(optimizer, feed_dict={X_ph: X_batch, M_ph: M_batch})
                n_exp += len(X_batch)

            print('..validating')
            valid_f1_score, valid_mse_score = sess.run([valid_f1, valid_mse],
                                                       feed_dict={
                                                           X_ph: X_valid,
                                                           M_ph: M_valid
                                                       })
            print('valid mse score:', valid_mse_score)
            print('valid f1 score:', valid_f1_score)
Exemple #25
0
    def generator(self):
        self.generator_called = True
        with self.tf_graph.as_default():
            scope = 'Generator'
            with tf.name_scope(scope):
                # X_ph = tf.placeholder('float32', [None, self.h, self.w, 1], name='X')
                # X_sn = tg.StartNode(input_vars=[X_ph])
                noise_ph = tf.placeholder('float32',
                                          [None, self.bottleneck_dim],
                                          name='noise')
                self.noise_sn = tg.StartNode(input_vars=[noise_ph])

                h1, w1 = valid(self.h,
                               self.w,
                               kernel_size=(5, 5),
                               stride=(1, 1))
                h2, w2 = valid(h1, w1, kernel_size=(5, 5), stride=(2, 2))
                h3, w3 = valid(h2, w2, kernel_size=(5, 5), stride=(2, 2))
                flat_dim = int(h3 * w3 * 32)
                print('h1:{}, w1:{}'.format(h1, w1))
                print('h2:{}, w2:{}'.format(h2, w2))
                print('h3:{}, w3:{}'.format(h3, w3))
                print('flat dim:{}'.format(flat_dim))

                # enc_hn = tg.HiddenNode(prev=[X_sn],
                #                        layers=[Conv2D(input_channels=1, num_filters=32, kernel_size=(5,5), stride=(1,1), padding='VALID'),
                #                                RELU(),
                #                                Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                                RELU(),
                #                                Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                                RELU(),
                #                             #    Conv2D(input_channels=32, num_filters=32, kernel_size=(5,5), stride=(2,2), padding='VALID'),
                #                             #    RELU(),
                #                                Flatten(),
                #                                Linear(flat_dim, 300),
                #                                RELU(),
                #                                # seq.add(Dropout(0.5))
                #                                Linear(300, self.bottleneck_dim),
                #                                Tanh(),
                #                                ])

                y_ph = tf.placeholder('float32', [None, self.nclass], name='y')
                self.y_sn = tg.StartNode(input_vars=[y_ph])

                noise_hn = tg.HiddenNode(prev=[self.noise_sn, self.y_sn],
                                         input_merge_mode=Concat(1))

                self.gen_hn = tg.HiddenNode(
                    prev=[noise_hn],
                    layers=[
                        Linear(self.bottleneck_dim + 10, flat_dim),
                        RELU(),

                        ######[ Method 0 ]######
                        #    Reshape((-1, h3, w3, 32)),
                        #    Conv2D_Transpose(input_channels=32, num_filters=100, output_shape=(h2,w2),
                        #                     kernel_size=(5,5), stride=(2,2), padding='VALID'),
                        ######[ End Method 0 ]######

                        ######[ Method 1 ]######
                        Reshape((-1, 1, 1, flat_dim)),
                        Conv2D_Transpose(input_channels=flat_dim,
                                         num_filters=200,
                                         output_shape=(2, 2),
                                         kernel_size=(2, 2),
                                         stride=(1, 1),
                                         padding='VALID'),
                        TFBatchNormalization(name=scope + '/dc1'),
                        RELU(),
                        Conv2D_Transpose(input_channels=200,
                                         num_filters=100,
                                         output_shape=(h2, w2),
                                         kernel_size=(9, 9),
                                         stride=(1, 1),
                                         padding='VALID'),
                        ######[ End Method 1 ]######
                        TFBatchNormalization(name=scope + '/dc2'),
                        RELU(),
                        Conv2D_Transpose(input_channels=100,
                                         num_filters=50,
                                         output_shape=(h1, w1),
                                         kernel_size=(5, 5),
                                         stride=(2, 2),
                                         padding='VALID'),
                        TFBatchNormalization(name=scope + '/dc3'),
                        RELU(),
                        Conv2D_Transpose(input_channels=50,
                                         num_filters=1,
                                         output_shape=(self.h, self.w),
                                         kernel_size=(5, 5),
                                         stride=(1, 1),
                                         padding='VALID'),
                        SetShape((-1, self.h, self.w, 1)),
                        Sigmoid()
                    ])

                y_en = tg.EndNode(prev=[self.gen_hn])
                graph = tg.Graph(start=[self.noise_sn, self.y_sn], end=[y_en])

                G_train_sb = graph.train_fprop()[0]
                G_test_sb = graph.test_fprop()[0]
                # import pdb; pdb.set_trace()
                gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                                 scope=scope)

        return y_ph, noise_ph, G_train_sb, G_test_sb, gen_var_list
Exemple #26
0
def classifier(X_ph, X_gen_ph, h, w):
    with tf.variable_scope('Classifier'):
        X_sn = tg.StartNode(input_vars=[X_ph])
        X_gen_sn = tg.StartNode(input_vars=[X_gen_ph])
        h1, w1 = same(in_height=h,
                      in_width=w,
                      stride=(1, 1),
                      kernel_size=(3, 3))
        h2, w2 = same(in_height=h1,
                      in_width=w1,
                      stride=(2, 2),
                      kernel_size=(2, 2))
        h3, w3 = same(in_height=h2,
                      in_width=w2,
                      stride=(1, 1),
                      kernel_size=(3, 3))
        h4, w4 = same(in_height=h3,
                      in_width=w3,
                      stride=(2, 2),
                      kernel_size=(2, 2))

        print('---', h, w)
        X_hn = tg.HiddenNode(prev=[X_sn],
                             layers=[
                                 Conv2D(input_channels=1,
                                        num_filters=32,
                                        kernel_size=(3, 3),
                                        stride=(1, 1),
                                        padding='SAME'),
                                 BatchNormalization(input_shape=[h1, w1, 32]),
                                 RELU(),
                                 MaxPooling(poolsize=(2, 2),
                                            stride=(2, 2),
                                            padding='SAME'),
                                 LRN(),
                                 Conv2D(input_channels=32,
                                        num_filters=64,
                                        kernel_size=(3, 3),
                                        stride=(1, 1),
                                        padding='SAME'),
                                 BatchNormalization(input_shape=[h3, w3, 64]),
                                 RELU(),
                                 MaxPooling(poolsize=(2, 2),
                                            stride=(2, 2),
                                            padding='SAME'),
                                 Flatten(),
                             ])

        X_gen_hn = tg.HiddenNode(
            prev=[X_gen_sn],
            layers=[
                Conv2D(input_channels=1,
                       num_filters=32,
                       kernel_size=(3, 3),
                       stride=(1, 1),
                       padding='SAME'),
                BatchNormalization(input_shape=[h1, w1, 32]),
                RELU(),
                MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'),
                LRN(),
                Conv2D(input_channels=32,
                       num_filters=64,
                       kernel_size=(3, 3),
                       stride=(1, 1),
                       padding='SAME'),
                BatchNormalization(input_shape=[h3, w3, 64]),
                RELU(),
                MaxPooling(poolsize=(2, 2), stride=(2, 2), padding='SAME'),
                Flatten(),
            ])

        print('===', h4 * w4 * 64 * 2)

        merge_hn = tg.HiddenNode(prev=[X_hn, X_gen_hn],
                                 input_merge_mode=Concat(),
                                 layers=[
                                     Linear(h4 * w4 * 64 * 2, 100),
                                     RELU(),
                                     BatchNormalization(input_shape=[100]),
                                     Linear(100, 1),
                                     Sigmoid()
                                 ])

        en = tg.EndNode(prev=[merge_hn])

        graph = tg.Graph(start=[X_sn, X_gen_sn], end=[en])
        y_train, = graph.train_fprop()
        y_test, = graph.test_fprop()
    return y_train, y_test
Exemple #27
0
def train():

    batchsize = 64
    learning_rate = 0.001
    max_epoch = 10

    X_train = np.random.rand(1000, 32, 32, 3)
    M_train = np.random.rand(1000, 32, 32, 1)

    X_valid = np.random.rand(1000, 32, 32, 3)
    M_valid = np.random.rand(1000, 32, 32, 1)

    X_ph = tf.placeholder('float32', [None, 32, 32, 3])
    M_ph = tf.placeholder('float32', [None, 32, 32, 1])

    h, w = 32, 32

    model = tg.Sequential()
    model.add(
        Conv2D(input_channels=3,
               num_filters=8,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    h1, w1 = same(h, w, kernel_size=(5, 5), stride=(2, 2))
    model.add(RELU())
    model.add(
        Conv2D(input_channels=8,
               num_filters=16,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    h2, w2 = same(h1, w1, kernel_size=(5, 5), stride=(2, 2))
    model.add(RELU())
    model.add(
        Conv2D_Transpose(input_channels=16,
                         num_filters=8,
                         output_shape=(h1, w1),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(RELU())
    model.add(
        Conv2D_Transpose(input_channels=8,
                         num_filters=1,
                         output_shape=(h, w),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(RELU())

    iter_model = tg.Sequential()
    iter_model.add(
        Conv2D(input_channels=1,
               num_filters=8,
               kernel_size=(5, 5),
               stride=(2, 2),
               padding='SAME'))
    iter_model.add(RELU())
    iter_model.add(
        Conv2D_Transpose(input_channels=8,
                         num_filters=1,
                         output_shape=(h, w),
                         kernel_size=(5, 5),
                         stride=(2, 2),
                         padding='SAME'))
    model.add(Iterative(sequential=iter_model, num_iter=10))

    M_train_s = model.train_fprop(X_ph)
    M_valid_s = model.test_fprop(X_ph)

    train_mse = tf.reduce_mean((M_ph - M_train_s)**2)
    valid_mse = tf.reduce_mean((M_ph - M_valid_s)**2)

    data_train = tg.SequentialIterator(X_train, M_train, batchsize=batchsize)
    data_valid = tg.SequentialIterator(X_valid, M_valid, batchsize=batchsize)

    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_mse)

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        for epoch in range(max_epoch):
            print('epoch:', epoch)
            print('..training')
            for X_batch, M_batch in data_train:
                sess.run(optimizer, feed_dict={X_ph: X_batch, M_ph: M_batch})

            print('..validating')
            valid_mse_score = sess.run(valid_mse,
                                       feed_dict={
                                           X_ph: X_valid,
                                           M_ph: M_valid
                                       })
            print('valid mse score:', valid_mse_score)