def identity_block(self, input, kernel_size, filters): filters1, filters2, filters3 = filters output = Conv2D(num_outputs=filters1, kernel_size=1, activation='linear', padding='same', in_layers=[input]) output = BatchNorm(in_layers=[output]) output = ReLU(output) output = Conv2D(num_outputs=filters2, kernel_size=kernel_size, activation='linear', padding='same', in_layers=[input]) output = BatchNorm(in_layers=[output]) output = ReLU(output) output = Conv2D(num_outputs=filters3, kernel_size=1, activation='linear', padding='same', in_layers=[input]) output = BatchNorm(in_layers=[output]) output = Add(in_layers=[output, input]) output = ReLU(output) return output
def test_mnist(self): from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) train = dc.data.NumpyDataset(mnist.train.images, mnist.train.labels) valid = dc.data.NumpyDataset(mnist.validation.images, mnist.validation.labels) # Images are square 28x28 (batch, height, width, channel) feature = Feature(shape=(None, 784), name="Feature") make_image = Reshape(shape=(-1, 28, 28, 1), in_layers=[feature]) conv2d_1 = Conv2D(num_outputs=32, normalizer_fn=tf.contrib.layers.batch_norm, in_layers=[make_image]) maxpool_1 = MaxPool(in_layers=[conv2d_1]) conv2d_2 = Conv2D(num_outputs=64, normalizer_fn=tf.contrib.layers.batch_norm, in_layers=[maxpool_1]) maxpool_2 = MaxPool(in_layers=[conv2d_2]) flatten = Flatten(in_layers=[maxpool_2]) dense1 = Dense(out_channels=1024, activation_fn=tf.nn.relu, in_layers=[flatten]) dense2 = Dense(out_channels=10, in_layers=[dense1]) label = Label(shape=(None, 10), name="Label") smce = SoftMaxCrossEntropy(in_layers=[label, dense2]) loss = ReduceMean(in_layers=[smce]) output = SoftMax(in_layers=[dense2]) tg = dc.models.TensorGraph(model_dir='/tmp/mnist', batch_size=1000, use_queue=True) tg.add_output(output) tg.set_loss(loss) tg.fit(train, nb_epoch=2) prediction = np.squeeze(tg.predict_proba_on_batch(valid.X)) fpr = dict() tpr = dict() roc_auc = dict() for i in range(10): fpr[i], tpr[i], thresh = roc_curve(valid.y[:, i], prediction[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) assert_true(roc_auc[i] > 0.99)
def test_Conv2D_pickle(): tg = TensorGraph() feature = Feature(shape=(tg.batch_size, 10, 10, 1)) layer = Conv2D(num_outputs=3, in_layers=feature) tg.add_output(layer) tg.set_loss(layer) tg.build() tg.save()
def __init__(self, img_rows=224, img_cols=224, weights="imagenet", classes=1000, **kwargs): super(ResNet50, self).__init__(use_queue=False, **kwargs) self.img_cols = img_cols self.img_rows = img_rows self.weights = weights self.classes = classes input = Feature(shape=(None, self.img_rows, self.img_cols, 3)) labels = Label(shape=(None, self.classes)) conv1 = Conv2D(num_outputs=64, kernel_size=7, stride=2, activation='linear', padding='same', in_layers=[input]) bn1 = BatchNorm(in_layers=[conv1]) ac1 = ReLU(bn1) pool1 = MaxPool2D(ksize=[1, 3, 3, 1], in_layers=[bn1]) cb1 = self.conv_block(pool1, 3, [64, 64, 256], 1) id1 = self.identity_block(cb1, 3, [64, 64, 256]) id1 = self.identity_block(id1, 3, [64, 64, 256]) cb2 = self.conv_block(id1, 3, [128, 128, 512]) id2 = self.identity_block(cb2, 3, [128, 128, 512]) id2 = self.identity_block(id2, 3, [128, 128, 512]) id2 = self.identity_block(id2, 3, [128, 128, 512]) cb3 = self.conv_block(id2, 3, [256, 256, 1024]) id3 = self.identity_block(cb3, 3, [256, 256, 1024]) id3 = self.identity_block(id3, 3, [256, 256, 1024]) id3 = self.identity_block(id3, 3, [256, 256, 1024]) id3 = self.identity_block(cb3, 3, [256, 256, 1024]) id3 = self.identity_block(id3, 3, [256, 256, 1024]) cb4 = self.conv_block(id3, 3, [512, 512, 2048]) id4 = self.identity_block(cb4, 3, [512, 512, 2048]) id4 = self.identity_block(id4, 3, [512, 512, 2048]) pool2 = AvgPool2D(ksize=[1, 7, 7, 1], in_layers=[id4]) flatten = Flatten(in_layers=[pool2]) dense = Dense(classes, in_layers=[flatten]) loss = SoftMaxCrossEntropy(in_layers=[labels, dense]) loss = ReduceMean(in_layers=[loss]) self.set_loss(loss) self.add_output(dense)
def conv_block(self, input, kernel_size, filters, strides=2): filters1, filters2, filters3 = filters output = Conv2D(num_outputs=filters1, kernel_size=1, stride=strides, activation='linear', padding='same', in_layers=[input]) output = BatchNorm(in_layers=[output]) output = ReLU(output) output = Conv2D(num_outputs=filters2, kernel_size=kernel_size, activation='linear', padding='same', in_layers=[output]) output = BatchNorm(in_layers=[output]) output = ReLU(output) output = Conv2D(num_outputs=filters3, kernel_size=1, activation='linear', padding='same', in_layers=[output]) output = BatchNorm(in_layers=[output]) shortcut = Conv2D(num_outputs=filters3, kernel_size=1, stride=strides, activation='linear', padding='same', in_layers=[input]) shortcut = BatchNorm(in_layers=[shortcut]) output = Add(in_layers=[shortcut, output]) output = ReLU(output) return output
def test_conv_2D(self): """Test that Conv2D can be invoked.""" length = 4 width = 5 in_channels = 2 out_channels = 3 batch_size = 20 in_tensor = np.random.rand(batch_size, length, width, in_channels) with self.test_session() as sess: in_tensor = tf.convert_to_tensor(in_tensor, dtype=tf.float32) out_tensor = Conv2D(out_channels, kernel_size=1)(in_tensor) sess.run(tf.global_variables_initializer()) out_tensor = out_tensor.eval() assert out_tensor.shape == (batch_size, length, width, out_channels)
def __init__(self, img_rows=512, img_cols=512, filters=[64, 128, 256, 512, 1024], model=dc.models.TensorGraph(), **kwargs): super(UNet, self).__init__(use_queue=False, **kwargs) self.img_cols = img_cols self.img_rows = img_rows self.filters = filters self.model = dc.models.TensorGraph() input = Feature(shape=(None, self.img_rows, self.img_cols)) conv1 = Conv2D(num_outputs=self.filters[0], kernel_size=3, activation='relu', padding='same', in_layers=[input]) conv1 = Conv2D(num_outputs=self.filters[0], kernel_size=3, activation='relu', padding='same', in_layers=[conv1]) pool1 = MaxPool2D(ksize=2, in_layers=[conv1]) conv2 = Conv2D(num_outputs=self.filters[1], kernel_size=3, activation='relu', padding='same', in_layers=[pool1]) conv2 = Conv2D(num_outputs=self.filters[1], kernel_size=3, activation='relu', padding='same', in_layers=[conv2]) pool2 = MaxPool2D(ksize=2, in_layers=[conv2]) conv3 = Conv2D(num_outputs=self.filters[2], kernel_size=3, activation='relu', padding='same', in_layers=[pool2]) conv3 = Conv2D(num_outputs=self.filters[2], kernel_size=3, activation='relu', padding='same', in_layers=[conv3]) pool3 = MaxPool2D(ksize=2, in_layers=[conv3]) conv4 = Conv2D(num_outputs=self.filters[3], kernel_size=3, activation='relu', padding='same', in_layers=[pool3]) conv4 = Conv2D(num_outputs=self.filters[3], kernel_size=3, activation='relu', padding='same', in_layers=[conv4]) pool4 = MaxPool2D(ksize=2, in_layers=[conv4]) conv5 = Conv2D(num_outputs=self.filters[4], kernel_size=3, activation='relu', padding='same', in_layers=[pool4]) conv5 = Conv2D(num_outputs=self.filters[4], kernel_size=3, activation='relu', padding='same', in_layers=[conv5]) up6 = Conv2DTranspose(num_outputs=self.filters[3], kernel_size=2, in_layers=[conv5]) concat6 = Concat(in_layers=[conv4, up6], axis=1) conv6 = Conv2D(num_outputs=self.filters[3], kernel_size=3, activation='relu', padding='same', in_layers=[concat6]) conv6 = Conv2D(num_outputs=self.filters[3], kernel_size=3, activation='relu', padding='same', in_layers=[conv6]) up7 = Conv2DTranspose(num_outputs=self.filters[2], kernel_size=2, in_layers=[conv6]) concat7 = Concat(in_layers=[conv3, up7], axis=1) conv7 = Conv2D(num_outputs=self.filters[2], kernel_size=3, activation='relu', padding='same', in_layers=[concat7]) conv7 = Conv2D(num_outputs=self.filters[2], kernel_size=3, activation='relu', padding='same', in_layers=[conv7]) up8 = Conv2DTranspose(num_outputs=self.filters[1], kernel_size=2, in_layers=[conv7]) concat8 = Concat(in_layers=[conv2, up8], axis=1) conv8 = Conv2D(num_outputs=self.filters[1], kernel_size=3, activation='relu', padding='same', in_layers=[concat8]) conv8 = Conv2D(num_outputs=self.filters[1], kernel_size=3, activation='relu', padding='same', in_layers=[conv8]) up9 = Conv2DTranspose(num_outputs=self.filters[0], kernel_size=2, in_layers=[conv8]) concat9 = Concat(in_layers=[conv1, up9], axis=1) conv9 = Conv2D(num_outputs=self.filters[0], kernel_size=3, activation='relu', padding='same', in_layers=[concat9]) conv9 = Conv2D(num_outputs=self.filters[0], kernel_size=3, activation='relu', padding='same', in_layers=[conv9]) conv10 = Conv2D(num_outputs=1, kernel_size=1, activation='sigmoid', in_layers=[conv9]) model.add_output(conv10)
def build_graph(self): # inputs placeholder self.inputs = Feature(shape=(None, self.image_size, self.image_size, 3), dtype=tf.float32) # data preprocessing and augmentation in_layer = DRAugment(self.augment, self.batch_size, size=(self.image_size, self.image_size), in_layers=[self.inputs]) # first conv layer in_layer = Conv2D(int(self.n_init_kernel), kernel_size=7, activation_fn=None, in_layers=[in_layer]) in_layer = BatchNorm(in_layers=[in_layer]) in_layer = ReLU(in_layers=[in_layer]) # downsample by max pooling res_in = MaxPool2D(ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], in_layers=[in_layer]) for ct_module in range(self.n_downsample - 1): # each module is a residual convolutional block # followed by a convolutional downsample layer in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)), kernel_size=1, activation_fn=None, in_layers=[res_in]) in_layer = BatchNorm(in_layers=[in_layer]) in_layer = ReLU(in_layers=[in_layer]) in_layer = Conv2D(int(self.n_init_kernel * 2**(ct_module - 1)), kernel_size=3, activation_fn=None, in_layers=[in_layer]) in_layer = BatchNorm(in_layers=[in_layer]) in_layer = ReLU(in_layers=[in_layer]) in_layer = Conv2D(int(self.n_init_kernel * 2**ct_module), kernel_size=1, activation_fn=None, in_layers=[in_layer]) res_a = BatchNorm(in_layers=[in_layer]) res_out = res_in + res_a res_in = Conv2D(int(self.n_init_kernel * 2**(ct_module + 1)), kernel_size=3, stride=2, in_layers=[res_out]) res_in = BatchNorm(in_layers=[res_in]) # max pooling over the final outcome in_layer = ReduceMax(axis=(1, 2), in_layers=[res_in]) for layer_size in self.n_fully_connected: # fully connected layers in_layer = Dense(layer_size, activation_fn=tf.nn.relu, in_layers=[in_layer]) # dropout for dense layers #in_layer = Dropout(0.25, in_layers=[in_layer]) logit_pred = Dense(self.n_tasks * self.n_classes, activation_fn=None, in_layers=[in_layer]) logit_pred = Reshape(shape=(None, self.n_tasks, self.n_classes), in_layers=[logit_pred]) weights = Weights(shape=(None, self.n_tasks)) labels = Label(shape=(None, self.n_tasks), dtype=tf.int32) output = SoftMax(logit_pred) self.add_output(output) loss = SparseSoftMaxCrossEntropy(in_layers=[labels, logit_pred]) weighted_loss = WeightedError(in_layers=[loss, weights]) # weight decay regularizer # weighted_loss = WeightDecay(0.1, 'l2', in_layers=[weighted_loss]) self.set_loss(weighted_loss)
def __init__(self, img_rows=512, img_cols=512, filters=[64, 128, 256, 512, 1024], **kwargs): super(UNet, self).__init__(use_queue=False, **kwargs) self.img_cols = img_cols self.img_rows = img_rows self.filters = filters input = Feature(shape=(None, self.img_rows, self.img_cols, 3)) labels = Label(shape=(None, self.img_rows * self.img_cols)) conv1 = Conv2D(num_outputs=self.filters[0], kernel_size=3, activation='relu', padding='same', in_layers=[input]) conv1 = Conv2D(num_outputs=self.filters[0], kernel_size=3, activation='relu', padding='same', in_layers=[conv1]) pool1 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv1]) conv2 = Conv2D(num_outputs=self.filters[1], kernel_size=3, activation='relu', padding='same', in_layers=[pool1]) conv2 = Conv2D(num_outputs=self.filters[1], kernel_size=3, activation='relu', padding='same', in_layers=[conv2]) pool2 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv2]) conv3 = Conv2D(num_outputs=self.filters[2], kernel_size=3, activation='relu', padding='same', in_layers=[pool2]) conv3 = Conv2D(num_outputs=self.filters[2], kernel_size=3, activation='relu', padding='same', in_layers=[conv3]) pool3 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv3]) conv4 = Conv2D(num_outputs=self.filters[3], kernel_size=3, activation='relu', padding='same', in_layers=[pool3]) conv4 = Conv2D(num_outputs=self.filters[3], kernel_size=3, activation='relu', padding='same', in_layers=[conv4]) pool4 = MaxPool2D(ksize=[1, 2, 2, 1], in_layers=[conv4]) conv5 = Conv2D(num_outputs=self.filters[4], kernel_size=3, activation='relu', padding='same', in_layers=[pool4]) conv5 = Conv2D(num_outputs=self.filters[4], kernel_size=3, activation='relu', padding='same', in_layers=[conv5]) up6 = Conv2DTranspose(num_outputs=self.filters[3], kernel_size=2, stride=2, in_layers=[conv5]) concat6 = Concat(in_layers=[conv4, up6], axis=3) conv6 = Conv2D(num_outputs=self.filters[3], kernel_size=3, activation='relu', padding='same', in_layers=[concat6]) conv6 = Conv2D(num_outputs=self.filters[3], kernel_size=3, activation='relu', padding='same', in_layers=[conv6]) up7 = Conv2DTranspose(num_outputs=self.filters[2], kernel_size=2, stride=2, in_layers=[conv6]) concat7 = Concat(in_layers=[conv3, up7], axis=3) conv7 = Conv2D(num_outputs=self.filters[2], kernel_size=3, activation='relu', padding='same', in_layers=[concat7]) conv7 = Conv2D(num_outputs=self.filters[2], kernel_size=3, activation='relu', padding='same', in_layers=[conv7]) up8 = Conv2DTranspose(num_outputs=self.filters[1], kernel_size=2, stride=2, in_layers=[conv7]) concat8 = Concat(in_layers=[conv2, up8], axis=3) conv8 = Conv2D(num_outputs=self.filters[1], kernel_size=3, activation='relu', padding='same', in_layers=[concat8]) conv8 = Conv2D(num_outputs=self.filters[1], kernel_size=3, activation='relu', padding='same', in_layers=[conv8]) up9 = Conv2DTranspose(num_outputs=self.filters[0], kernel_size=2, stride=2, in_layers=[conv8]) concat9 = Concat(in_layers=[conv1, up9], axis=3) conv9 = Conv2D(num_outputs=self.filters[0], kernel_size=3, activation='relu', padding='same', in_layers=[concat9]) conv9 = Conv2D(num_outputs=self.filters[0], kernel_size=3, activation='relu', padding='same', in_layers=[conv9]) conv10 = Conv2D(num_outputs=1, kernel_size=1, activation='sigmoid', in_layers=[conv9]) loss = SoftMaxCrossEntropy(in_layers=[labels, conv10]) loss = ReduceMean(in_layers=[loss]) self.set_loss(loss) self.add_output(conv10)