Пример #1
0
def mdsr(num_layers=80, feature_size=64):
    input_tensor = Input(shape=(img_size, img_size, channel))

    # One convolution before res blocks and to convert to required feature depth
    x = Conv2D(feature_size, (3, 3), activation='relu',
               padding='same')(input_tensor)

    conv_x2 = utils.res_block(x, feature_size, kernel=5)
    conv_x2 = utils.res_block(conv_x2, feature_size, kernel=5)
    conv_x3 = utils.res_block(x, feature_size, kernel=5)
    conv_x3 = utils.res_block(conv_x3, feature_size, kernel=5)
    conv_x4 = utils.res_block(x, feature_size, kernel=5)
    conv_x4 = utils.res_block(conv_x4, feature_size, kernel=5)

    x = add([conv_x2, conv_x3, conv_x4])

    # Add the residual blocks to the model
    for i in range(num_layers):
        x = utils.res_block(x, feature_size)

    x = Conv2D(feature_size, (3, 3), padding='same')(x)

    # Upsample output of the convolution
    x2 = utils.upsample(add([x, conv_x2]), 2, feature_size)
    x3 = utils.upsample(add([x, conv_x3]), 3, feature_size)
    x4 = utils.upsample(add([x, conv_x4]), 4, feature_size)

    outputs = [x2, x3, x4]

    model = Model(inputs=input_tensor, outputs=outputs, name="MDSR")
    return model
Пример #2
0
def Generator():
    # For convolutions arithmetics see: https://arxiv.org/abs/1603.07285

    down_stack = [
        downsample(64, 4, apply_batch_norm=False, strides=3,
                   name='down_0'),  # (bs, 8, 8, 64)
        downsample(128, 4, name='down_1'),  # (bs, 4, 4, 128)
        downsample(256, 3, strides=1, name='down_2'),  # (bs, 4, 4, 256)
        downsample(512, 3, name='down_3'),  # (bs, 2, 2, 512)
        downsample(512, 4, name='down_4'),  # (bs, 1, 1, 512)
    ]

    up_stack = [
        upsample(512, 4, apply_dropout=True, name='up_0'
                 ),  # (bs, 2, 2, 512) - after concatenation (bs, 2, 2, 1024)
        upsample(256, 4, apply_dropout=True, name='up_1'
                 ),  # (bs, 4, 4, 256) - after concatenation (bs, 4, 4, 512)
        upsample(128, 4, apply_dropout=True, strides=1, name='up_2'
                 ),  # (bs, 4, 4, 128) - after concatenation (bs, 4, 4, 256)
        upsample(64,
                 2,
                 apply_dropout=True,
                 strides=2,
                 name='up_3',
                 padding='valid'
                 ),  # (bs, 8, 8, 64) - after concatenation (bs, 8, 8, 128)
    ]

    initializer = tf.random_normal_initializer(0., 0.02)
    last = tf.keras.layers.Conv2DTranspose(
        OUTPUT_CHANNELS,
        8,
        strides=2,
        padding='valid',
        kernel_initializer=initializer,
        activation='tanh')  # (bs, 22, 22, 3)

    concat = tf.keras.layers.Concatenate()

    inputs = tf.keras.layers.Input(shape=[None, None, 3])
    x = inputs

    # Downsampling through the model
    skips = []
    for down in down_stack:
        x = down(x)
        skips.append(x)

    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        x = concat([x, skip])

    x = last(x)

    return tf.keras.Model(inputs=inputs, outputs=x)
Пример #3
0
def JPEG_decompression(data, channels=3):
	#Meta Data
	height = data[-1]
	width = data[-2]
	quality = data[-3]
	d_height = data[-4]
	d_width = data[-5]

	#Remove Meta Data
	data = data[:-5]
	#Unzigzag
	data_z = zigzag_decode(data, d_height, d_width, channels)
	#Unquantize
	im_q = unquantize(data_z,quality)
	#IDCT
	im_idct = idct_2d(im_q)
	#Unblock and Unpad
	im = unblock_image(im_idct,d_height,d_width)
	#Upsample

	#Undo offset and return to RGB
	im[:,:,[1,2]] -= 128
	im = lab2rgb(im) * 255 # lab2rgb converts to float64
	#Undo Padding
	im = im[:d_height,:d_width]

	#Upsample
	im = utils.upsample(im,(height,width))

	return im.astype(np.uint8)
Пример #4
0
def uNet(images,
         img_size=(512, 512),
         out_channels=128,
         views=2,
         normalizer_fn=tf_layers.batch_norm,
         activation=tf.nn.leaky_relu):
    """
    images:n*h*x*c

    Returns:
    [?,h,w,64]
    list of heatmaps:
        heatmap[0]=top
        heatmap[1]=bottom
    """
    with tf.name_scope("model"):
        images = tf.reshape(images, [-1, img_size[0], img_size[1], 3])
        #images = tf.cast(images, tf.float32)
        with tf.variable_scope("encoder"):
            with framework.arg_scope([tf_layers.conv2d],
                                     kernel_size=3,
                                     stride=2,
                                     normalizer_fn=normalizer_fn,
                                     activation_fn=tf.nn.leaky_relu,
                                     padding="same"):
                e1 = tf_layers.conv2d(images, num_outputs=64)  # 256 x 256 x 64

                e2 = tf_layers.conv2d(e1, num_outputs=128)  # 128x128x128

                e3 = tf_layers.conv2d(e2, num_outputs=128)  # 64x64x256

                e4 = tf_layers.conv2d(e3, num_outputs=256)  # 32x32x512

                e5 = tf_layers.conv2d(e4, num_outputs=512)  # 16x16x512

                e6 = tf_layers.conv2d(e5, num_outputs=512)  # 8X8X512

                e7 = tf_layers.conv2d(e6, num_outputs=512)  # 4X4X512

                encoded = tf_layers.conv2d(e7, num_outputs=512)  # 2X2X512

        with tf.name_scope("decoders"):
            d6 = utils.upsample(encoded, 512)  # 4X4x512
            d5 = utils.upsample(tf.concat([d6, e7], 3), 512)  # 8X8X512
            d4 = utils.upsample(tf.concat([d5, e6], 3), 256)  # 16x16x512
            d3 = utils.upsample(tf.concat([d4, e5], 3), 256)  # 32x32x256
            d2 = utils.upsample(tf.concat([d3, e4], 3), 128)  # 64x64x128
            d1 = utils.upsample(tf.concat([d2, e3], 3), 128)  # 128x128x64
            d0 = utils.upsample(tf.concat([d1, e2], 3), 128)  # 256x256x64

            decoded = utils.upsample(
                tf.concat([d0, e1], 3),
                out_channels,
                activation_fn=tf.nn.relu,
                normalizer_fn=tf_layers.batch_norm)  # 512x512xout_channels

        return decoded
Пример #5
0
    def __init__(self, ):
        super(Generator_, self).__init__()
        self.OUTPUT_CHANNELS = 3
        self.downsample = [
            downsample(64, 4, apply_batchnorm=False),
            downsample(128, 4),  # (bs, 64, 64, 128)
            downsample(256, 4),  # (bs, 32, 32, 256)
            downsample(512, 4),  # (bs, 16, 16, 512)
            downsample(512, 4),  # (bs, 8, 8, 512)
            downsample(512, 4),  # (bs, 4, 4, 512)
            downsample(512, 4),  # (bs, 2, 2, 512)
            downsample(512, 4)
        ]  # (bs, 1, 1, 512)

        self.upsample = [
            upsample(512, 4, apply_dropout=True),  # (bs,2,2,1024)
            upsample(512, 4, apply_dropout=True),  # (bs,4,4,1024)
            upsample(512, 4, apply_dropout=True),  # (bs,8,8,1024)
            upsample(512, 4),  # (bs, 16, 16, 1024)
            upsample(256, 4),  # (bs, 32, 32, 512)
            upsample(128, 4),  # (bs, 64, 64, 256)
            upsample(64, 4)  # (bs, 128, 128, 128)
        ]

        initializer = tf.random_normal_initializer(0., 0.02)
        self.last = tf.keras.layers.Conv2DTranspose(
            self.OUTPUT_CHANNELS,
            4,
            strides=2,
            padding='same',
            kernel_initializer=initializer,
            activation='tanh')  # (bs, 256, 256, 3)
        self.concat = tf.keras.layers.Concatenate()
Пример #6
0
    def gen_G(self, feature_size, num_layers, scale, x):
        image_input, mean_x = self.preprossessing(x, int(self.img_size / 2))
        # One convolution before res blocks and to convert to required feature
        # depth
        # conv  # input ( 32*32 ) output ( 32*32 )
        x = slim.conv2d(image_input, feature_size, [3, 3])

        # Store the output of the first convolution to add later      *********
        conv_1 = x
        """
        This creates `num_layers` number of resBlocks
        a resBlock is defined in the paper as
        (excuse the ugly ASCII graph)
        x
        |\
        | \
        |  conv2d
        |  relu
        |  conv2d
        | /
        |/
        + (addition here)
        |
        result
        """
        """
        Doing scaling here as mentioned in the paper:

        `we found that increasing the number of feature
        maps above a certain level would make the training procedure
        numerically unstable. A similar phenomenon was
        reported by Szegedy et al. We resolve this issue by
        adopting the residual scaling with factor 0.1. In each
        residual block, constant scaling layers are placed after the
        last convolution layers. These modules stabilize the training
        procedure greatly when using a large number of filters.
        In the test phase, this layer can be integrated into the previous
        convolution layer for the computational efficiency.'

        """
        scaling_factor = 0.1

        # Add the residual blocks to the model
        for i in range(num_layers):  # 32        conv_1---conv_64
            x = utils.resBlock(x, feature_size, scale=scaling_factor)

        # One more convolution, and then we add the output of our first conv
        # layer      *******************
        # conv_65                       #   LR -> HR
        x = slim.conv2d(x, feature_size, [3, 3])
        x += conv_1  # 补齐残差

        # Upsample output of the convolution
        x = utils.upsample(x, scale, feature_size, None)  # conv_66 conv_67
        x = tf.clip_by_value(x + mean_x, 0.0, 255.0)

        return x
Пример #7
0
 def back_propagate(self):
     """
     Refine parameters of this layer with residuals from next layer
     """
     # compute residuals of previous convolutional layer
     img_shape = self.x_imgs[0].shape
     self.prev_layer.delta = numpy.asarray(map(lambda d: upsample(d, img_shape), self.delta))
     # continue back propagating
     self.prev_layer.back_propagate()
Пример #8
0
def preprocess(file_list,
               start,
               end,
               sr=48000,
               scale=6,
               dimension=64,
               stride=8,
               tag='train'):
    random.shuffle(file_list)
    data_size = end - start + 1
    lr_patches = list()
    hr_patches = list()
    dataset_name = None
    for i, wav_path in enumerate(file_list[start:end + 1]):
        if i % 10 == 0:
            print("%s - %d/%d" % (wav_path, i + 1 + start, len(file_list)))

        # Get low sample rate version data for training
        x_hr, fs = librosa.load(wav_path, sr=sr)
        x_len = len(x_hr)
        x_hr = x_hr[:x_len - (x_len % scale)]

        # Down sampling for Low res version
        #x_lr = decimate(x, scale)
        x_lr = np.array(x_hr[0::scale])

        # Upscale using cubic spline Interpolation
        x_lr = upsample(x_lr, scale)

        x_lr = np.reshape(x_lr, (len(x_lr), 1))
        x_hr = np.reshape(x_hr, (len(x_hr), 1))

        for i in range(0, x_lr.shape[0] - dimension, stride):
            lr_patch = x_lr[i:i + dimension]

            #mid = dimension // 2 - stride // 2
            #hr_patch = x_hr[i+mid:i+mid+stride]

            hr_patch = x_hr[i:i + dimension]

            lr_patches.append(lr_patch)
            hr_patches.append(hr_patch)

    hr_len = len(hr_patches)
    lr_len = len(lr_patches)

    hr_patches = np.array(hr_patches[0:hr_len])
    lr_patches = np.array(lr_patches[0:lr_len])

    print('high resolution(Y) dataset shape is ', hr_patches.shape)
    print('low resolution(X) dataset shape is ', lr_patches.shape)

    dataset_name = 'data/asr-ex%d-start%d-end%d-scale%d-sr%d-dim%d-strd%d-%s.h5' % (
        data_size, start, end, scale, sr, dimension, stride, tag)

    return lr_patches, hr_patches, dataset_name
Пример #9
0
def build_model(scale, num_layers=32, feature_size=256, scaling_factor=0.1):
    input_tensor = Input(shape=(img_size, img_size, channel))

    # One convolution before res blocks and to convert to required feature depth
    x = Conv2D(feature_size, (kernel, kernel),
               activation='relu',
               padding='same',
               name='conv1')(input_tensor)

    # Store the output of the first convolution to add later
    conv_1 = x
    """
    This creates `num_layers` number of resBlocks
    a resBlock is defined in the paper as
    (excuse the ugly ASCII graph)
    x
    |\
    | \
    |  conv2d
    |  relu
    |  conv2d
    | /
    |/
    + (addition here)
    |
    result
    """
    """
    Doing scaling here as mentioned in the paper:
    `we found that increasing the number of feature
    maps above a certain level would make the training procedure
    numerically unstable. A similar phenomenon was
    reported by Szegedy et al. We resolve this issue by
    adopting the residual scaling with factor 0.1. In each
    residual block, constant scaling layers are placed after the
    last convolution layers. These modules stabilize the training
    procedure greatly when using a large number of filters.
    In the test phase, this layer can be integrated into the previous
    convolution layer for the computational efficiency.'
    """

    # Add the residual blocks to the model
    for i in range(num_layers):
        x = utils.res_block(x, feature_size, scale=scaling_factor)

    x = Conv2D(feature_size, (kernel, kernel), padding='same')(x)
    x = Add()([x, conv_1])

    # Upsample output of the convolution
    x = utils.upsample(x, scale, feature_size)

    outputs = x

    model = Model(inputs=input_tensor, outputs=outputs, name="EDSR")
    return model
Пример #10
0
    def __init__(self,
                 img_size=32,
                 num_layers=32,
                 feature_size=256,
                 scale=2,
                 output_channels=3):
        print("Building EDSR...")
        #Placeholder for image inputs
        self.input = x = tf.placeholder(
            tf.float32, [None, img_size, img_size, output_channels])
        #Placeholder for upscaled image ground-truth
        self.target = y = tf.placeholder(
            tf.float32,
            [None, img_size * scale, img_size * scale, output_channels])

        #One convolution before res blocks and to convert to required feature depth
        x = slim.conv2d(x, feature_size, [3, 3])
        """
		This creates `num_layers` number of resBlocks
		a resBlock is defined in the paper as
		(excuse the ugly ASCII graph)
		x
		|\
		| \
		|  conv2d
		|  relu
		|  conv2d
		| /
		|/
		+ (addition here)
		|
		result
		"""
        for i in range(num_layers):
            x = utils.resBlock(x, feature_size)

        #Two more convolutions on the output of the res blocks
        x = slim.conv2d(x, feature_size, [3, 3])
        x = slim.conv2d(x, output_channels, [3, 3])

        #Upsample output of the convolution
        x = utils.upsample(x, scale, feature_size, None)

        #One final convolution on the upsampling output
        self.out = output = slim.conv2d(x, output_channels, [3, 3])

        self.loss = loss = tf.reduce_mean(
            tf.losses.absolute_difference(y, output))

        #Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
Пример #11
0
    def forward(self, x, noise_sigma):
        noise_map = noise_sigma.view(x.shape[0], 1, 1,
                                     1).repeat(1, x.shape[1], x.shape[2] // 2,
                                               x.shape[3] // 2)

        x_up = utils.downsample(x.data)  # 4 * C * H/2 * W/2
        x_cat = torch.cat((noise_map.data, x_up), 1)  # 4 * (C + 1) * H/2 * W/2
        x_cat = Variable(x_cat)

        h_dncnn = self.intermediate_dncnn(x_cat)
        y_pred = utils.upsample(h_dncnn)
        return y_pred
Пример #12
0
        def model():

            x = tf.contrib.layers.conv2d(images, 64, kernel_size=(3, 3), stride=1, padding='SAME')
            conv1 = x

            for i in range(64):
                x = resBlock(x, 256, 0.1)

            x = tf.contrib.layers.conv2d(x, 64, kernel_size=(3, 3), stride=1, padding='SAME')
            x += conv1
            x = upsample(x, 3, 256, None)
            out = x

            return out
Пример #13
0
def crn(conv5, weights_file):

    # Contextual Reweighting Network

    conv5 = downsample(conv5)
    # g Multiscale Context Filters, dimension is Bx13x13x84
    convg3x3 = conv(conv5,
                    3,
                    3,
                    32,
                    1,
                    1,
                    name='convg3x3',
                    trainable=True,
                    initializer=tf.contrib.layers.xavier_initializer())
    convg5x5 = conv(conv5,
                    5,
                    5,
                    32,
                    1,
                    1,
                    name='convg5x5',
                    trainable=True,
                    initializer=tf.contrib.layers.xavier_initializer())
    convg7x7 = conv(conv5,
                    7,
                    7,
                    20,
                    1,
                    1,
                    name='convg7x7',
                    trainable=True,
                    initializer=tf.contrib.layers.xavier_initializer())
    # convg = tf.concat([convg3x3, convg5x5, convg7x7], 3)
    convg = tf.concat(3, [convg3x3, convg5x5, convg7x7])
    # w Accumulation Weight, 13x13x84 to 13x13x1
    convw = conv(convg,
                 1,
                 1,
                 1,
                 1,
                 1,
                 name='convw',
                 trainable=True,
                 initializer=tf.contrib.layers.xavier_initializer())
    # Bx13x13x1 to BxWxHx1
    m = upsample(convw)

    return m
Пример #14
0
def main(img_id):
    model = UNet(n_channels=1, n_classes=1)
    #model_file = max(glob.glob('models/*'), key=os.path.getctime) #detect latest version
    model_file = 'models/intensity_filtering_continued/Checkpoint_e1_d0.9755_l0.0008_2018-11-13_11:06:28.pth'  #best one!!
    model.load_state_dict(torch.load(model_file))
    model = model.double()
    img_path = 'data/testing/slices/img/'
    img_vol_path = 'data/testing/img/'  #this is for getting an accurate header
    data_test = [img_path, img_id]
    test_loader = torch.utils.data.DataLoader(img_loader(data_test))
    hdr = nib.load(img_path + img_id).header
    vol_hdr = nib.load(img_vol_path + img_id[0:7] + '.nii.gz').header
    hdr['pixdim'] = vol_hdr[
        'pixdim']  #explicitly set this to force it to keep the correct pixel dimensions
    prediction = test(model, test_loader).numpy()
    prediction = np.reshape(prediction, (256, 256))
    prediction = upsample(prediction, 2)
    save(prediction, 'data/testing/slices/pred/' + img_id, hdr)
Пример #15
0
def Propagate_MS(ref, val_F2, val_P2, scales):
    h, w = val_F2.size()[2], val_F2.size()[3]
    msv_E2 = {}
    for sc in scales:
        if sc != 1.0:
            msv_F2, msv_P2 = downsample([val_F2, val_P2], sc)
            msv_F2, msv_P2 = ToCudaVariable([msv_F2, msv_P2], volatile=True)
            r5, r4, r3, r2 = model.module.Encoder(msv_F2, msv_P2)
            e2 = model.module.Decoder(r5, ref[sc], r4, r3, r2)
            msv_E2[sc] = upsample(
                F.softmax(e2[0], dim=1)[:, 1].data.cpu(), (h, w))
        else:
            msv_F2, msv_P2 = ToCudaVariable([val_F2, val_P2], volatile=True)
            r5, r4, r3, r2 = model.module.Encoder(msv_F2, msv_P2)
            e2 = model.module.Decoder(r5, ref[sc], r4, r3, r2)
            msv_E2[sc] = F.softmax(e2[0], dim=1)[:, 1].data.cpu()

    val_E2 = torch.zeros(val_P2.size())
    for sc in scales:
        val_E2 += msv_E2[sc]
    val_E2 /= len(scales)
    return val_E2
Пример #16
0
    def model(self):

        x = tf.contrib.layers.conv2d(self.images,
                                     64,
                                     kernel_size=(3, 3),
                                     stride=1,
                                     padding='SAME')
        conv1 = x

        for i in range(64):
            x = resBlock(x, self.feature_size, scale=self.scaling_factor)

        x = tf.contrib.layers.conv2d(x,
                                     64,
                                     kernel_size=(3, 3),
                                     stride=1,
                                     padding='SAME')
        x += conv1
        x = upsample(x, self.scale, self.feature_size, None)
        out = x

        return out
Пример #17
0
    def __init__(self):
        print("Building MYSR...")
        self.imgsize = config.TRAIN.imgsize
        self.output_channels = config.TRAIN.output_channels
        self.scale = config.TRAIN.scale

        # Placeholder for image inputs
        self.input = x = tf.placeholder(
            tf.float32, [None, None, None, self.output_channels])
        # Placeholder for upscaled image ground-truth
        self.target = y = tf.placeholder(
            tf.float32, [None, None, None, self.output_channels])

        # 输入预处理
        # result = result / (255. / 2.)
        # TODO: 后边有relu层,注意将输入图像收缩至[-1, 1]区间是否合适
        # result = result - 1.
        image_input = x / (255. / 2.)
        image_input = image_input - 1
        image_target = y / (255. / 2.)
        image_target = image_target - 1

        # 貌似即使收缩至[-1, 1]区间,卷积层依旧可以有效适应,只是注意最后一层不能使用relu,b毕竟relu值域在[0, x]
        # ENCODER
        # 入口
        x = slim.conv2d(image_input, 64, [5, 5])  # 入口适当大点?
        conv_1 = x

        # ENCODER-resBlock-64
        scaling_factor = 1
        for i in range(3):
            x = utils.resBlock(x, 64, scale=scaling_factor)

        x = slim.conv2d(image_input, 128, [3, 3])

        # ENCODER-resBlock-128
        scaling_factor = 1
        for i in range(4):
            x = utils.resBlock(x, 128, scale=scaling_factor)

        x = slim.conv2d(image_input, 256, [3, 3])

        # ENCODER-resBlock-256
        scaling_factor = 1
        for i in range(5):
            x = utils.resBlock(x, 256, scale=scaling_factor)

        # Upsample output of the convolution
        x = utils.upsample(x, self.scale, 128, None)

        # DECODER-resBlock-64
        scaling_factor = 0.1
        for i in range(4):
            x = utils.resBlock(x, 64, scale=scaling_factor)

        # DECODER-resBlock-32
        scaling_factor = 0.1
        for i in range(3):
            x = utils.resBlock(x, 32, scale=scaling_factor)

        # DECODER-resBlock-16
        scaling_factor = 0.1
        for i in range(2):
            x = utils.resBlock(x, 16, scale=scaling_factor)

        # DECODER-resBlock-8
        scaling_factor = 0.1
        for i in range(1):
            x = utils.resBlock(x, 8, scale=scaling_factor)

        # DECODER-输出
        # TODO: 貌似这里直接使用conv会破坏逐步修复结构?反而会因为缺少feature_map而导致精细度降低?
        x = slim.conv2d(x, self.output_channels, [3, 3])

        output = x

        # 结果
        self.out = tf.clip_by_value((output + 1) * (255. / 2.), 0.0, 255.0)
        self.loss = loss = tf.reduce_mean(
            tf.losses.absolute_difference(image_target, output))

        # Calculating Peak Signal-to-noise-ratio
        # Using equations from here: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
        mse = tf.reduce_mean(tf.squared_difference(image_target, output))
        PSNR = tf.constant(255**2, dtype=tf.float32) / mse
        PSNR = tf.constant(10, dtype=tf.float32) * utils.log10(PSNR)

        # Scalar to keep track for loss
        tf.summary.scalar("loss", self.loss)
        tf.summary.scalar("PSNR", PSNR)
        # Image summaries for input, target, and output
        tf.summary.image("input_image", tf.cast(self.input, tf.uint8))
        tf.summary.image("target_image", tf.cast(self.target, tf.uint8))
        tf.summary.image("output_image", tf.cast(self.out, tf.uint8))

        # Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
Пример #18
0
    model = utils.load_model(model_name)
    model.cuda()
    # Get the explainer
    explainer = get_explainer(model, method_name)

    # Transform the image
    inp = transf(raw_img)
    if method_name == 'googlenet':  # swap channel due to caffe weights
        inp_copy = inp.clone()
        inp[0] = inp_copy[2]
        inp[2] = inp_copy[0]
    inp = utils.cuda_var(inp.unsqueeze(0), requires_grad=True)

    target = torch.LongTensor([image_class]).cuda()
    saliency = explainer.explain(inp, target)
    saliency = utils.upsample(saliency, (raw_img.height, raw_img.width))
    #all_saliency_maps.append(saliency.cpy().numpy())
    all_saliency_maps.append(saliency.cpu().numpy())

# Display all the results
plt.figure(figsize=(25, 15))
plt.subplot(3, 5, 1)
plt.imshow(raw_img)
plt.axis('off')
plt.title(displayed_class)
for i, (saliency,
        (model_name, method_name,
         show_style)) in enumerate(zip(all_saliency_maps, model_methods)):
    plt.subplot(3, 5, i + 2 + i // 4)
    if show_style == 'camshow':
        viz.plot_cam(np.abs(saliency).max(axis=1).squeeze(),
Пример #19
0
import numpy as np
import utils

l_valid_np = np.load('../valid.npy')
ab_valid_np = np.load('../valid_est.npy')

res_valid_np = np.ndarray(shape=(100, 256, 256, 3))
ab_valid_np = np.transpose(ab_valid_np, (0, 2, 3, 1))

for i in range(100):
    img_lab = np.concatenate((l_valid_np[i].astype(
        np.uint8), utils.upsample(ab_valid_np[i].astype(np.double))),
                             axis=2)
    img_rgb = utils.cvt2rgb(img_lab)

    img_rgb = img_rgb * 255
    img_rgb = img_rgb.astype(np.uint8)

    res_valid_np[i] = img_rgb

with open('../estimations_valid.npy', 'wb') as file:
    np.save(file, res_valid_np)

l_test_np = np.load('../test.npy')
ab_test_np = np.load('../test_est.npy')

res_test_np = np.ndarray(shape=(100, 256, 256, 3))
ab_test_np = np.transpose(ab_test_np, (0, 2, 3, 1))

for i in range(100):
    img_lab = np.concatenate((l_test_np[i].astype(
Пример #20
0
    def __init__(self,
                 img_size=48,
                 num_layers=32,
                 feature_size=32,
                 scale=2,
                 output_channels=3):
        def _ignore_boundary(images, scale):
            boundary_size = scale + 6
            images = images[:, boundary_size:-boundary_size,
                            boundary_size:-boundary_size, :]
            return images

        def _float32_to_uint8(images):
            images = images * 255.0
            images = tf.round(images)
            images = tf.saturate_cast(images, tf.uint8)
            return images

        def _residual_block(x, feature_size):
            skip = x
            x = conv2d_weight_norm(
                x,
                feature_size * 4,
                3,
                padding='same',
                name='conv0',
            )
            x = tf.nn.relu(x)
            x = conv2d_weight_norm(
                x,
                feature_size,
                3,
                padding='same',
                name='conv1',
            )
            return x + skip

        print("Begin creating wEDSR...")
        self.img_size = img_size  #缩小的图像块,这里是48
        self.num_layers = num_layers  #层数
        self.scale = scale  #x2/x3/x4
        self.output_channels = output_channels  #3->RGB
        self.feature_size = feature_size  #输出卷积维度大小
        self.feature_size_first = feature_size * 4  #输入卷积维度大小

        #Placeholder for image inputs
        self.input = x = tf.placeholder(
            tf.uint8, [None, img_size, img_size, output_channels])
        #Placeholder for upscaled image ground-truth
        self.target = y = tf.placeholder(
            tf.uint8,
            [None, img_size * scale, img_size * scale, output_channels])

        x = tf.cast(x, tf.float32)
        y = tf.cast(y, tf.float32)

        mean_x = 127  #tf.reduce_mean(self.input)
        image_input = x - mean_x
        mean_y = 127  #tf.reduce_mean(self.target)
        image_target = y - mean_y

        skip = image_input

        #     One convolution before res blocks and to convert to required feature depth
        x = slim.conv2d(
            image_input,
            self.feature_size,
            [3, 3],
        )

        #Add the residual blocks to the model
        for i in range(num_layers):
            x = utils.dirac_conv2d(x,
                                   self.feature_size,
                                   3,
                                   3,
                                   1,
                                   1,
                                   name="dircov_" + str(i) + "_1",
                                   atrainable=True)
            x = tf.nn.relu(x, name="relu_" + str(i))
            # x = utils.dirac_conv2d(x,feature_size,3,3,1,1,name="dircov_"+str(i)+"_2",atrainable=True)
            # x = tf.nn.relu(x, name="relu_1_"+str(i))
            # x = utils.dirac_conv2d_wtf(x,feature_size_first,3,3,1,1,name="dircov_"+str(i)+"_1")
            # x = tf.nn.relu(x, name="relu_"+str(i))
            # x = utils.dirac_conv2d_wtf(x,feature_size,3,3,1,1,name="dircov_"+str(i)+"_2")

        #Upsample output of the convolution
        x = utils.upsample(x, scale, feature_size, None)
        skip = slim.conv2d(
            skip,
            self.feature_size,
            [3, 3],
        )
        skip = utils.upsample(skip,
                              scale,
                              self.feature_size,
                              None,
                              filtersize=[5, 5])

        # x = slim.conv2d(image_input,self.feature_size_first,(3,3), activation_fn=tf.nn.relu)
        # skip = utils.upsample(x,scale,self.feature_size_first,activation=tf.nn.relu,filtersize=[5,5])

        # for i in range(self.num_layers):
        #     x = utils.dirac_conv2d(x,self.feature_size_first,3,3,1,1,name="dircov_"+str(i)+"_1",atrainable=True)
        #     x = tf.nn.relu(x, name="relu_"+str(i))
        #     x = utils.dirac_conv2d(x,feature_size,3,3,1,1,name="dircov_"+str(i)+"_2",atrainable=True)
        #     # x = tf.nn.relu(x, name="relu_"+str(i)+"_2")
        #     # x = utils.dirac_conv2d_wtf(x,self.feature_size_first,3,3,1,1,name="dircov_"+str(i)+"_1")
        #     # x = tf.nn.relu(x, name="relu_"+str(i)+"_1")
        #     # x = utils.dirac_conv2d_wtf(x,self.feature_size,3,3,1,1,name="dircov_"+str(i)+"_2")
        #     # x = tf.nn.relu(x, name="relu_"+str(i)+"_2")
        # x = utils.upsample(x,scale,feature_size,activation=tf.nn.relu,filtersize=[3,3])
        '''
        with tf.variable_scope('skip'):
            # skip = slim.conv2d(image_input,self.feature_size,(3,3), activation_fn=tf.nn.relu)
            skip = utils._subpixel_block(   x,
                                            kernel_size = (5,5),
                                            feature_size= self.output_channels,
                                            scale=self.scale)

        # with tf.variable_scope('input'):
        #     x = conv2d_weight_norm( inputs=image_input,
        #                             filters=self.feature_size,
        #                             kernel_size=3,
        #                             padding='same',
        #                             trainable=True,
        #                             activation=tf.nn.relu)
        for i in range(self.num_layers):
            with tf.variable_scope('layer{}'.format(i)):
                x = _residual_block(x, feature_size=self.feature_size)

        with tf.variable_scope('output'):
            x = utils._subpixel_block(  x, 
                                        kernel_size =(3,3),
                                        feature_size=self.output_channels,
                                        scale=self.scale)
        '''

        x_target = skip + x
        x_target = tf.maximum(x_target, -127)
        x_target = tf.minimum(x_target, 128)

        self.out = tf.saturate_cast(x_target + mean_x, tf.uint8)

        # self.loss = tf.reduce_mean(tf.losses.absolute_difference(image_target,x_target))
        self.loss = tf.reduce_mean(
            tf.losses.mean_squared_error(image_target, x_target))

        psnr = tf.image.psnr(
            _ignore_boundary(self.target, self.scale),
            _ignore_boundary(self.out, self.scale),
            max_val=255,
        )
        self.PSNR = tf.reduce_mean(psnr)
        ssim = tf.image.ssim(
            _ignore_boundary(self.target, self.scale),
            _ignore_boundary(self.out, self.scale),
            max_val=255,
        )
        self.SSIM = tf.reduce_mean(ssim)
        # self.SSIM = tf.metrics.mean(ssim)

        self.bicubics = tf.image.resize_images(
            self.input,
            (self.img_size * self.scale, self.img_size * self.scale),
            tf.image.ResizeMethod.BICUBIC)
        self.bicubics = tf.clip_by_value(self.bicubics, 0.0, 255.0)
        self.bicubics = tf.cast(self.bicubics, tf.uint8)
        bicupsnr = tf.image.psnr(
            _ignore_boundary(self.target, self.scale),
            _ignore_boundary(self.bicubics, self.scale),
            max_val=255,
        )
        self.BIC_PSNR = tf.reduce_mean(bicupsnr)

        tf.summary.scalar("loss", self.loss)
        tf.summary.scalar("Out_PSNR", self.PSNR)
        tf.summary.scalar("ReB_PSNR", self.BIC_PSNR)
        tf.summary.scalar("SSIM", self.SSIM)

        #Image summaries for input, target, and output
        tf.summary.image("input_image", tf.cast(self.input, tf.uint8))
        tf.summary.image("resize_image", tf.cast(self.bicubics, tf.uint8))
        tf.summary.image("target_image", tf.cast(self.target, tf.uint8))
        tf.summary.image("output_image", tf.cast(self.out, tf.uint8))

        #Tensorflow graph setup... session, saver, etc.
        gpu_options = tf.GPUOptions(allow_growth=True)
        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
        self.saver = tf.train.Saver(max_to_keep=3)
        variables_to_restore = tf.contrib.framework.get_variables_to_restore(
            exclude=['Conv_5', 'Conv_6', 'Conv_4', 'Conv_3', 'Conv_7'])
        self.saver_change = tf.train.Saver(variables_to_restore)

        print("Model creation completed!")
Пример #21
0
def main():

    # LOAD TRAINING IMAGE NAMES
    with open(TRAIN_IMAGENAME_PATH, 'r') as infile:
        global train_imagename
        train_imagename = [line.strip() for line in infile]

    # LOAD VALIDATION IMAGE NAMES
    with open(VALID_IMAGENAME_PATH, 'r') as infile:
        global valid_imagename
        valid_imagename = [line.strip() for line in infile]

    print("-> image names are loaded")
    print()

    # ---------------------------------------------------------------------------------------------------------------- #
    # ---------------------------------------------------------------------------------------------------------------- #

    # LOAD GRAY IMAGES
    gray_images = list()
    for gray_imagename in os.listdir(GRAY_IMAGE_PATH):
        gray_image, s = read_image(GRAY_IMAGE_PATH + gray_imagename)
        gray_image = (gray_imagename, cvt2Lab(gray_image)[0])
        gray_images.append(gray_image)
    gray_images = sorted(gray_images, key=lambda x: x[0])
    print("-> gray images are loded")

    # SPLIT GRAY IMAGES TO TRAIN AND VALIDATION SETS
    x_train, x_valid = np.empty([1, 1, 256, 256]), np.empty([1, 1, 256, 256])
    for gray_imagename, gray_image in gray_images:
        if gray_imagename in train_imagename:
            x_train = np.concatenate(
                [x_train,
                 np.reshape(gray_image, (1, ) + x_train.shape[1:])])
        if gray_imagename in valid_imagename:
            x_valid = np.concatenate(
                [x_valid,
                 np.reshape(gray_image, (1, ) + x_valid.shape[1:])])
    x_train, x_valid = x_train[1:], x_valid[1:]
    print("-> gray images are splitted to datasets")
    print()

    # SAVE TRAINING AND VALIDATION FEATURES
    #np.save("x_train.npy", x_train)
    #print("-> x_train is saved")
    #np.save("x_valid.npy", x_valid)
    #print("-> x_valid is saved")
    #print()

    # ---------------------------------------------------------------------------------------------------------------- #
    # ---------------------------------------------------------------------------------------------------------------- #

    # LOAD 64X64 COLOR IMAGES
    color64_images = list()
    for color64_imagename in os.listdir(COLOR_64_IMAGE_PATH):
        color64_image, s = read_image(COLOR_64_IMAGE_PATH + color64_imagename,
                                      training=True)
        color64_image = (color64_imagename, cvt2Lab(color64_image)[1])
        color64_images.append(color64_image)
    color64_images = sorted(color64_images, key=lambda x: x[0])
    print("-> 64x64 color images are loded")

    # SPLIT 64x64 COLOR IMAGES TO TRAIN AND VALIDATION SETS
    y_train, y_valid = np.empty([1, 64, 64, 2]), np.empty([1, 64, 64, 2])
    for color64_imagename, color64_image in color64_images:
        if color64_imagename in train_imagename:
            y_train = np.concatenate(
                [y_train, np.expand_dims(color64_image, axis=0)])
        if color64_imagename in valid_imagename:
            y_valid = np.concatenate(
                [y_valid, np.expand_dims(color64_image, axis=0)])
    y_train, y_valid = np.rollaxis(y_train[1:], 3,
                                   1), np.rollaxis(y_valid[1:], 3, 1)
    print("-> 64x64 color images are splitted to datasets")
    print()

    # SAVE TRAINING AND VALIDATION LABELS
    #np.save("y_train.npy", y_train)
    #print("-> y_train is saved")
    #np.save("y_valid.npy", y_valid)
    #print("-> y_valid is saved")
    #print()

    # ---------------------------------------------------------------------------------------------------------------- #
    # ---------------------------------------------------------------------------------------------------------------- #

    # LOAD TRAINING AND TEST SAMPLES
    #x_train = np.load('x_train.npy')
    #x_valid = np.load('x_valid.npy')
    #y_train = np.load('y_train.npy')
    #y_valid = np.load('y_valid.npy')

    # DISPLAY DATASET SIZE
    train_size = x_train.shape[0]
    valid_size = x_valid.shape[0]
    print("-> train Size : %d" % train_size)
    print("-> valid Size : %d" % valid_size)
    print()

    # SET TRAINING PARAMETERS
    BATCH_SIZE = 50
    EPOCH = 250

    model = ConvNet().cuda()
    loss_fn = torch.nn.MSELoss()
    optimizer = torch.optim.RMSprop(model.parameters(), lr=1e-4)

    ### TRAINING ### TRAINING ### TRAINING ### TRAINING ### TRAINING ### TRAINING ### TRAINING ### TRAINING ### TRAINING ###
    for epoch in range(EPOCH):
        running_train_loss = .0
        running_valid_loss = .0
        for i in range(0, train_size, BATCH_SIZE):
            trainX, trainY = torch.autograd.Variable(torch.from_numpy(x_train[i:i + BATCH_SIZE]).float().cuda(), requires_grad=False),\
                             torch.autograd.Variable(torch.from_numpy(y_train[i:i + BATCH_SIZE]).float().cuda(), requires_grad=False)

            optimizer.zero_grad()

            train_output = model(trainX)
            train_loss = loss_fn(train_output, trainY)

            train_loss.backward()
            optimizer.step()

            running_train_loss += train_loss.data[0]

        for i in range(0, valid_size, BATCH_SIZE):
            validX, validY = torch.autograd.Variable(torch.from_numpy(x_valid[i:i + BATCH_SIZE]).float().cuda(), requires_grad=False),\
                             torch.autograd.Variable(torch.from_numpy(y_valid[i:i + BATCH_SIZE]).float().cuda(), requires_grad=False)

            valid_output = model(validX)
            valid_loss = loss_fn(valid_output, validY)

            running_valid_loss += valid_loss.data[0]

        print("%d\ttrain loss : %s\t%s" %
              (epoch + 1,
               str(
                   format(running_train_loss /
                          (x_train.shape[0] / BATCH_SIZE), '.8g')),
               str(
                   format(running_valid_loss /
                          (x_valid.shape[0] / BATCH_SIZE), '.8g'))))

    # GET VALIDATION PREDICTIONS
    # Shape -> (100, 2, 64, 64)
    pred_valid = np.vstack([model(torch.autograd.Variable(torch.from_numpy(x_valid[i:i + BATCH_SIZE]).float().cuda(),
                                                          requires_grad=False)).cpu().data.numpy() \
                            for i in range(0, valid_size, BATCH_SIZE)])

    # ADJUST VALIDATION PREDICTION DIMENSIONS
    # Shape -> (100, 64, 64, 2)
    pred_valid = np.transpose(pred_valid, (0, 2, 3, 1))

    # UPSAMPLE VALIDATION PREDICTIONS
    # Shape -> (100, 256, 256, 2)
    pred_valid = np.vstack([np.expand_dims(upsample(pred.astype(np.float)), axis=0) \
                            for pred in pred_valid])

    # INSERT LIGHT CHANNEL TO VALIDATION PREDICTIONS
    # Shape -> (100, 256, 256, 3)
    pred_valid = np.vstack([np.expand_dims(np.insert(pred_valid[i], 0, x_valid[i], axis=2), axis=0) \
                            for i in range(len(pred_valid))])

    # CONVERT VALIDATION PREDICTIONS TO RGB IMAGES
    # Shape -> (100, 256, 256, 3)
    pred_valid = np.vstack([np.expand_dims((cvt2rgb(pred) * 255.).astype(np.uint8), axis=0) \
                            for pred in pred_valid])

    np.save('validation_estimations.npy', pred_valid)

    # DISPLAY VALIDATION ACCURACY
    print("Validation acc: ")
    subprocess.call(
        ["python3", "evaluate.py", "validation_estimations.npy", "valid.txt"])
    print()

    try:
        os.remove('validation_estimations.npy')
    except:
        pass

    # SAVE PYTORCH MODEL
    torch.save(model.state_dict(), MODEL_PATH)
    print("-> image colorization model is saved to %s" % MODEL_PATH)

    return
Пример #22
0
	def __init__(self,img_size=32,num_layers=32,feature_size=128,scale=2,output_channels=1):
		print("Building EDSR...")
		self.img_size = img_size
		self.scale = scale
		self.output_channels = output_channels

		#Placeholder for image inputs
		self.input = x = tf.placeholder(tf.float32,[None,img_size,img_size,output_channels])
		#Placeholder for upscaled image ground-truth
		self.target = y = tf.placeholder(tf.float32,[None,img_size*scale,img_size*scale,output_channels])
	
		"""
		Preprocessing as mentioned in the paper, by subtracting the mean
		However, the subtract the mean of the entire dataset they use. As of
		now, I am subtracting the mean of each batch
		"""
		mean_x = tf.reduce_mean(self.input)
		image_input =x- mean_x
		mean_y = tf.reduce_mean(self.target)
		image_target =y- mean_y

		#One convolution before res blocks and to convert to required feature depth
		x = slim.conv2d(image_input,feature_size,[3,3])
	
		#Store the output of the first convolution to add later
		conv_1 = x	

		"""
		This creates `num_layers` number of resBlocks
		a resBlock is defined in the paper as
		(excuse the ugly ASCII graph)
		x
		|\
		| \
		|  conv2d
		|  relu
		|  conv2d
		| /
		|/
		+ (addition here)
		|
		result
		"""

		"""
		Doing scaling here as mentioned in the paper:

		`we found that increasing the number of feature
		maps above a certain level would make the training procedure
		numerically unstable. A similar phenomenon was
		reported by Szegedy et al. We resolve this issue by
		adopting the residual scaling with factor 0.1. In each
		residual block, constant scaling layers are placed after the
		last convolution layers. These modules stabilize the training
		procedure greatly when using a large number of filters.
		In the test phase, this layer can be integrated into the previous
		convolution layer for the computational efficiency.'

		"""
		scaling_factor = 0.1
		
		#Add the residual blocks to the model
		for i in range(num_layers):
			x = utils.resBlock(x,feature_size,scale=scaling_factor)

		#One more convolution, and then we add the output of our first conv layer
		x = slim.conv2d(x,feature_size,[3,3])
		x += conv_1
		
		#Upsample output of the convolution		
		x = utils.upsample(x,scale,feature_size,None)

		#One final convolution on the upsampling output
		output =x# slim.conv2d(x,output_channels,[3,3])
		self.out = tf.clip_by_value(output+mean_x,0.0,255.0)
		self.loss = loss = tf.reduce_mean(tf.losses.absolute_difference(image_target,output))
	
		#Calculating Peak Signal-to-noise-ratio
		#Using equations from here: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
		mse = tf.reduce_mean(tf.squared_difference(image_target,output))	
		PSNR = tf.constant(255**2,dtype=tf.float32)/mse
		PSNR = tf.constant(10,dtype=tf.float32)*utils.log10(PSNR)
	
		#Scalar to keep track for loss
		tf.summary.scalar("loss",self.loss)
		tf.summary.scalar("PSNR",PSNR)
		#Image summaries for input, target, and output
		tf.summary.image("input_image",tf.cast(self.input,tf.uint8))
		tf.summary.image("target_image",tf.cast(self.target,tf.uint8))
		tf.summary.image("output_image",tf.cast(self.out,tf.uint8))
		
		#Tensorflow graph setup... session, saver, etc.
		self.sess = tf.Session()
		self.saver = tf.train.Saver()
		print("Done building!")
Пример #23
0
def compute_saliency_map(model_name, displayed_class, number_image):
    model_methods = [
        [model_name, 'vanilla_grad', 'imshow'],
        [model_name, 'grad_x_input', 'imshow'],
        [model_name, 'saliency', 'imshow'],
        [model_name, 'integrate_grad', 'imshow'],
        [model_name, 'deconv', 'imshow'],
        [model_name, 'guided_backprop', 'imshow'],
        #[model_name, 'gradcam', 'camshow'],
        #[model_name, 'excitation_backprop', 'camshow'],
        #[model_name, 'contrastive_excitation_backprop', 'camshow']
    ]
    # Change 'image_class' to 0 if you want to display for a dog
    if (displayed_class == "dog"):
        image_class = 0
    elif (displayed_class == "cat"):
        image_class = 1
    else:
        print("ERROR: wrong displayed class")

    # Take the sample image, and display it (original form)
    image_path = "models/test_" + displayed_class + "_images/" + str(
        number_image)

    raw_img = viz.pil_loader(image_path)
    plt.figure(figsize=(5, 5))
    plt.imshow(raw_img)
    plt.axis('off')
    plt.title(displayed_class)

    # Now, we want to display the saliency maps of this image, for every model_method element
    all_saliency_maps = []

    for model_name, method_name, _ in model_methods:
        # Get a specific picture transformation (see torchvision.transforms documentation)
        transf = get_preprocess(model_name, method_name)
        # Load the pretrained model
        model = utils.load_model(model_name)
        model.cuda()
        # Get the explainer
        explainer = get_explainer(model, method_name)

        # Transform the image
        inp = transf(raw_img)
        if method_name == 'googlenet':  # swap channel due to caffe weights
            inp_copy = inp.clone()
            inp[0] = inp_copy[2]
            inp[2] = inp_copy[0]
        inp = utils.cuda_var(inp.unsqueeze(0), requires_grad=True)

        target = torch.LongTensor([image_class]).cuda()
        saliency = explainer.explain(inp, target)
        saliency = utils.upsample(saliency, (raw_img.height, raw_img.width))
        #all_saliency_maps.append(saliency.cpy().numpy())
        all_saliency_maps.append(saliency.cpu().numpy())

    # Display all the results
    plt.figure(figsize=(25, 15))
    plt.subplot(3, 5, 1)
    plt.imshow(raw_img)
    plt.axis('off')
    plt.title(displayed_class)
    for i, (saliency,
            (model_name, method_name,
             show_style)) in enumerate(zip(all_saliency_maps, model_methods)):
        plt.subplot(3, 5, i + 2 + i // 4)
        if show_style == 'camshow':
            viz.plot_cam(np.abs(saliency).max(axis=1).squeeze(),
                         raw_img,
                         'jet',
                         alpha=0.5)
        else:
            if model_name == 'googlenet' or method_name == 'pattern_net':
                saliency = saliency.squeeze()[::-1].transpose(1, 2, 0)
            else:
                saliency = saliency.squeeze().transpose(1, 2, 0)
            saliency -= saliency.min()
            saliency /= (saliency.max() + 1e-20)
            plt.imshow(saliency, cmap='gray')

        plt.axis('off')
        if method_name == 'excitation_backprop':
            plt.title('Exc_bp')
        elif method_name == 'contrastive_excitation_backprop':
            plt.title('CExc_bp')
        else:
            plt.title('%s' % (method_name))

    plt.tight_layout()

    if not os.path.exists('images/' + model_name + '/'):
        os.makedirs('images/' + model_name + '/')
    save_destination = 'images/' + model_name + '/' + str(
        number_image[:-4]) + '_saliency.png'

    plt.savefig(save_destination)
    plt.clf()
Пример #24
0
	def upconv(self, x, scale, feature_size):
		#Upsample output of the convolution		
		with tf.variable_scope('upconv', reuse=False) as vs:
			x = utils.upsample(x, scale, feature_size, None)
		return x
Пример #25
0
    def __init__(self,
                 img_size=32,
                 num_layers=32,
                 feature_size=256,
                 scale=2,
                 output_channels=3):
        print("Building EDSR...")
        #Placeholder for image inputs
        self.input = x = tf.placeholder(
            tf.float32, [None, img_size, img_size, output_channels])
        #Placeholder for upscaled image ground-truth
        self.target = y = tf.placeholder(
            tf.float32,
            [None, img_size * scale, img_size * scale, output_channels])

        #One convolution before res blocks and to convert to required feature depth
        x = slim.conv2d(x, feature_size, [3, 3])

        #Store the output of the first convolution to add later
        conv_1 = x
        """
		This creates `num_layers` number of resBlocks
		a resBlock is defined in the paper as
		(excuse the ugly ASCII graph)
		x
		|\
		| \
		|  conv2d
		|  relu
		|  conv2d
		| /
		|/
		+ (addition here)
		|
		result
		"""
        """
		Doing scaling here as mentioned in the paper:

		`we found that increasing the number of feature
		maps above a certain level would make the training procedure
		numerically unstable. A similar phenomenon was
		reported by Szegedy et al. We resolve this issue by
		adopting the residual scaling with factor 0.1. In each
		residual block, constant scaling layers are placed after the
		last convolution layers. These modules stabilize the training
		procedure greatly when using a large number of filters.
		In the test phase, this layer can be integrated into the previous
		convolution layer for the computational efficiency.'

		"""
        scaling_factor = 1 if feature_size <= 64 else 0.1

        #Add the residual blocks to the model
        for i in range(num_layers):
            x = utils.resBlock(x, feature_size, scale=scaling_factor)

        #One more convolution, and then we add the output of our first conv layer
        x = slim.conv2d(x, feature_size, [3, 3])
        x += conv_1

        #Upsample output of the convolution
        x = utils.upsample(x, scale, feature_size, None)

        #One final convolution on the upsampling output
        self.out = output = x  # slim.conv2d(x,output_channels,[3,3])

        self.loss = loss = tf.reduce_mean(
            tf.losses.absolute_difference(y, output))

        #Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
Пример #26
0
    def findPeaks(self, calib, evt, thr_high=None, thr_low=None):

        if facility == 'LCLS':
            if self.streakMask_on:  # make new streak mask
                self.streakMask = self.StreakMask.getStreakMaskCalib(evt)

            # Apply background correction
            if self.medianFilterOn:
                calib -= median_filter_ndarr(calib, self.medianRank)

            if self.radialFilterOn:
                self.pf.shape = calib.shape  # FIXME: shape is 1d
                calib = self.rb.subtract_bkgd(calib * self.pf)
                calib.shape = self.userPsanaMask.shape  # FIXME: shape is 1d

            self.calib = calib  # save background subtracted calib as an attribute

            if self.streakMask is not None:
                self.combinedMask = self.userPsanaMask * self.streakMask
            else:
                self.combinedMask = self.userPsanaMask

            # set new mask
            #self.alg.set_mask(self.combinedMask) # This doesn't work reliably
        elif facility == 'PAL':
            self.combinedMask = self.userMask

        # set algorithm specific parameters
        if self.algorithm == 1:
            if facility == 'LCLS':
                #print "param: ", self.npix_min, self.npix_max, self.atot_thr, self.son_min, thr_low, thr_high, np.sum(self.combinedMask)
                # v1 - aka Droplet Finder - two-threshold peak-finding algorithm in restricted region
                #                           around pixel with maximal intensity.
                if thr_high is None:  # use gui input
                    self.peakRadius = int(self.hitParam_alg1_radius)
                    self.peaks = self.alg.peak_finder_v4r3(
                        calib,
                        thr_low=self.hitParam_alg1_thr_low,
                        thr_high=self.hitParam_alg1_thr_high,
                        rank=self.hitParam_alg1_rank,
                        r0=self.hitParam_alg1_radius,
                        dr=self.hitParam_alg1_dr,
                        mask=self.combinedMask.astype(np.uint16))
#                    self.peaks = self.alg.peak_finder_v4r2(calib,
#                                                           thr_low=self.hitParam_alg1_thr_low,
#                                                           thr_high=self.hitParam_alg1_thr_high,
#                                                           rank=self.hitParam_alg1_rank,
#                                                           r0=self.hitParam_alg1_radius,
#                                                           dr=self.hitParam_alg1_dr)
                else:
                    self.peaks = self.alg.findPeaks(calib,
                                                    npix_min=self.npix_min,
                                                    npix_max=self.npix_max,
                                                    atot_thr=self.atot_thr,
                                                    son_min=self.son_min,
                                                    thr_low=thr_low,
                                                    thr_high=thr_high,
                                                    mask=self.combinedMask)


#                    self.peaks = self.alg.peak_finder_v4r2(calib,
#                                                           thr_low=thr_low,
#                                                           thr_high=thr_high,
#                                                           rank=self.hitParam_alg1_rank,
#                                                           r0=self.hitParam_alg1_radius,
#                                                           dr=self.hitParam_alg1_dr)
            elif facility == 'PAL':
                self.peakRadius = int(self.hitParam_alg1_radius)
                _calib = np.zeros((1, calib.shape[0], calib.shape[1]))
                _calib[0, :, :] = calib
                if self.combinedMask is None:
                    _mask = None
                else:
                    _mask = self.combinedMask.astype(np.uint16)

                self.peaks = self.alg.findPeaks(
                    _calib,
                    npix_min=self.npix_min,
                    npix_max=self.npix_max,
                    son_min=self.son_min,
                    thr_low=self.hitParam_alg1_thr_low,
                    thr_high=self.hitParam_alg1_thr_high,
                    atot_thr=self.atot_thr,
                    r0=self.peakRadius,
                    dr=int(self.hitParam_alg1_dr),
                    mask=_mask)
        elif self.algorithm == 2:
            if facility == 'LCLS':
                #print "param: ", self.npix_min, self.npix_max, self.atot_thr, self.son_min, thr_low, thr_high, np.sum(self.combinedMask)
                # v1 - aka Droplet Finder - two-threshold peak-finding algorithm in restricted region
                #                           around pixel with maximal intensity.
                self.peakRadius = int(self.hitParam_alg1_radius)
                self.peaks = self.alg.peak_finder_v3r3(
                    calib,
                    rank=int(self.hitParam_alg1_rank),
                    r0=self.peakRadius,
                    dr=self.hitParam_alg1_dr,
                    nsigm=self.son_min,
                    mask=self.combinedMask.astype(np.uint16))
        elif self.algorithm == 3:
            if facility == 'LCLS':
                # perform binning here
                binr = 2
                binc = 2
                downCalib = sm.block_reduce(calib,
                                            block_size=(1, binr, binc),
                                            func=np.sum)
                downWeight = sm.block_reduce(self.combinedMask,
                                             block_size=(1, binr, binc),
                                             func=np.sum)
                warr = np.zeros_like(downCalib, dtype='float32')
                ind = np.where(downWeight > 0)
                warr[ind] = downCalib[ind] / downWeight[ind]
                upCalib = utils.upsample(warr, calib.shape, binr, binc)
                self.peakRadius = int(self.hitParam_alg1_radius)
                self.peaks = self.alg.peak_finder_v3r3(
                    upCalib,
                    rank=int(self.hitParam_alg1_rank),
                    r0=self.peakRadius,
                    dr=self.hitParam_alg1_dr,
                    nsigm=self.son_min,
                    mask=self.combinedMask.astype(np.uint16))

        self.numPeaksFound = self.peaks.shape[0]

        if self.numPeaksFound > 0:
            if facility == 'LCLS':
                cenX = self.iX[np.array(self.peaks[:, 0], dtype=np.int64),
                               np.array(self.peaks[:, 1], dtype=np.int64),
                               np.array(self.peaks[:,
                                                   2], dtype=np.int64)] + 0.5
                cenY = self.iY[np.array(self.peaks[:, 0], dtype=np.int64),
                               np.array(self.peaks[:, 1], dtype=np.int64),
                               np.array(self.peaks[:,
                                                   2], dtype=np.int64)] + 0.5
            elif facility == 'PAL':
                cenX = self.iX[np.array(self.peaks[:, 1], dtype=np.int64),
                               np.array(self.peaks[:,
                                                   2], dtype=np.int64)] + 0.5
                cenY = self.iY[np.array(self.peaks[:, 1], dtype=np.int64),
                               np.array(self.peaks[:,
                                                   2], dtype=np.int64)] + 0.5
            self.maxRes = getMaxRes(cenX, cenY, self.cx, self.cy)
        else:
            self.maxRes = 0

        if self.numPeaksFound >= 15:
            if self.powderHits is None:
                self.powderHits = calib
            else:
                self.powderHits = np.maximum(self.powderHits, calib)
        else:
            if self.powderMisses is None:
                self.powderMisses = calib
            else:
                self.powderMisses = np.maximum(self.powderMisses, calib)

        if self.powderHits is None: self.powderHits = np.zeros_like(calib)
        if self.powderMisses is None: self.powderMisses = np.zeros_like(calib)
Пример #27
0
    def updateClassification(self):
        if self.parent.calib is not None:
            if self.parent.mk.streakMaskOn:
                self.parent.mk.initMask()
                self.parent.mk.streakMask = self.parent.mk.StreakMask.getStreakMaskCalib(self.parent.evt)
                if self.parent.mk.streakMask is None:
                    self.parent.mk.streakMaskAssem = None
                else:
                    self.parent.mk.streakMaskAssem = self.parent.det.image(self.parent.evt, self.parent.mk.streakMask)
                self.algInitDone = False

            self.parent.mk.displayMask()

            # update combined mask
            self.parent.mk.combinedMask = np.ones_like(self.parent.calib)
            if self.parent.mk.streakMask is not None and self.parent.mk.streakMaskOn is True:
                self.parent.mk.combinedMask *= self.parent.mk.streakMask
            if self.parent.mk.userMask is not None and self.parent.mk.userMaskOn is True:
                self.parent.mk.combinedMask *= self.parent.mk.userMask
            if self.parent.mk.psanaMask is not None and self.parent.mk.psanaMaskOn is True:
                self.parent.mk.combinedMask *= self.parent.mk.psanaMask

            # Peak output (0-16):
            # 0 seg
            # 1 row
            # 2 col
            # 3 npix: no. of pixels in the ROI intensities above threshold
            # 4 amp_max: max intensity
            # 5 amp_tot: sum of intensities
            # 6,7: row_cgrav: center of mass
            # 8,9: row_sigma
            # 10,11,12,13: minimum bounding box
            # 14: background
            # 15: noise
            # 16: signal over noise
            if self.algorithm == 0: # No peak algorithm
                self.peaks = None
                self.drawPeaks()
            else:
                # Only initialize the hit finder algorithm once
                if self.algInitDone is False:
                    self.windows = None
                    self.alg = []
                    # set peak-selector parameters:
                    if self.algorithm == 1:
                        self.alg = PyAlgos(mask=None, pbits=0)
                        self.peakRadius = int(self.hitParam_alg1_radius)
                        self.alg.set_peak_selection_pars(npix_min=self.hitParam_alg1_npix_min, npix_max=self.hitParam_alg1_npix_max, \
                                                amax_thr=self.hitParam_alg1_amax_thr, atot_thr=self.hitParam_alg1_atot_thr, \
                                                son_min=self.hitParam_alg1_son_min)
                    elif self.algorithm == 2:
                        self.alg = PyAlgos(mask=None, pbits=0)
                        self.peakRadius = int(self.hitParam_alg1_radius)
                        self.alg.set_peak_selection_pars(npix_min=self.hitParam_alg1_npix_min, npix_max=self.hitParam_alg1_npix_max, \
                                                amax_thr=self.hitParam_alg1_amax_thr, atot_thr=self.hitParam_alg1_atot_thr, \
                                                son_min=self.hitParam_alg1_son_min)
                    elif self.algorithm == 3:
                        self.alg = PyAlgos(mask=None, pbits=0)
                        self.peakRadius = int(self.hitParam_alg1_radius)
                        self.alg.set_peak_selection_pars(npix_min=self.hitParam_alg1_npix_min, npix_max=self.hitParam_alg1_npix_max, \
                                                amax_thr=self.hitParam_alg1_amax_thr, atot_thr=self.hitParam_alg1_atot_thr, \
                                                son_min=self.hitParam_alg1_son_min)
                    ix = self.parent.det.indexes_x(self.parent.evt)
                    iy = self.parent.det.indexes_y(self.parent.evt)
                    self.iX = np.array(ix, dtype=np.int64)
                    self.iY = np.array(iy, dtype=np.int64)

                    self.algInitDone = True

                self.parent.calib = self.parent.calib * 1.0 # Neccessary when int is returned

                if self.algorithm == 1:
                    # v1 - aka Droplet Finder - two-threshold peak-finding algorithm in restricted region
                    #                           around pixel with maximal intensity.
                    if not self.turnOnAutoPeaks:
                        self.peakRadius = int(self.hitParam_alg1_radius)
                        self.peaks = self.alg.peak_finder_v4r3(self.parent.calib,
                                                               thr_low=self.hitParam_alg1_thr_low,
                                                               thr_high=self.hitParam_alg1_thr_high,
                                                               rank=int(self.hitParam_alg1_rank),
                                                               r0=self.peakRadius,
                                                               dr=self.hitParam_alg1_dr,
                                                               mask=self.parent.mk.combinedMask.astype(np.uint16))
                    else:
                        ################################
                        # Determine thr_high and thr_low
                        if self.ind is None:
                            with pg.BusyCursor():
                                powderSumFname = self.parent.psocakeRunDir + '/' + \
                                            self.parent.experimentName + '_' + \
                                            str(self.parent.runNumber).zfill(4) + '_' + \
                                            self.parent.detInfo + '_mean.npy'
                                if not os.path.exists(powderSumFname):
                                    # Generate powder
                                    cmd = "mpirun -n 6 generatePowder exp=" + self.parent.experimentName + \
                                          ":run=" + str(self.parent.runNumber) + " -d " + self.parent.detInfo + \
                                          " -o " + self.parent.psocakeRunDir
                                    cmd += " -n " + str(256)
                                    cmd += " --random"
                                    print("Running on local machine: ", cmd)
                                    process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                                                               shell=True)
                                    out, err = process.communicate()
                                # Copy as background.npy
                                import shutil
                                shutil.copyfile(powderSumFname, self.parent.psocakeRunDir + '/background.npy')

                                # Read in powder pattern and calculate pixel indices
                                powderSum = np.load(powderSumFname)
                                powderSum1D = powderSum.ravel()
                                cy, cx = self.parent.det.indexes_xy(self.parent.evt)
                                #ipx, ipy = self.parent.det.point_indexes(self.parent.evt, pxy_um=(0, 0))
                                try:
                                    ipy, ipx = self.parent.det.point_indexes(self.parent.evt, pxy_um=(0, 0),
                                                                          pix_scale_size_um=None,
                                                                          xy0_off_pix=None,
                                                                          cframe=gu.CFRAME_PSANA, fract=True)
                                except AttributeError:
                                    ipx, ipy = self.parent.det.point_indexes(self.parent.evt, pxy_um=(0, 0))
                                r = np.sqrt((cx - ipx) ** 2 + (cy - ipy) ** 2).ravel().astype(int)
                                startR = 0
                                endR = np.max(r)
                                profile = np.zeros(endR - startR, )
                                for i, val in enumerate(np.arange(startR, endR)):
                                    ind = np.where(r == val)[0].astype(int)
                                    if len(ind) > 0: profile[i] = np.mean(powderSum1D[ind])
                                myThreshInd = np.argmax(profile)
                                print("###################################################")
                                print("Solution scattering radius (pixels): ", myThreshInd)
                                print("###################################################")
                                thickness = 10
                                indLo = np.where(r >= myThreshInd - thickness / 2.)[0].astype(int)
                                indHi = np.where(r <= myThreshInd + thickness / 2.)[0].astype(int)
                                self.ind = np.intersect1d(indLo, indHi)

                                ix = self.parent.det.indexes_x(self.parent.evt)
                                iy = self.parent.det.indexes_y(self.parent.evt)
                                self.iX = np.array(ix, dtype=np.int64)
                                self.iY = np.array(iy, dtype=np.int64)

                        calib1D = self.parent.calib.ravel()
                        mean = np.mean(calib1D[self.ind])
                        spread = np.std(calib1D[self.ind])
                        highSigma = 3.5
                        lowSigma = 2.5
                        thr_high = int(mean + highSigma * spread + 50)
                        thr_low = int(mean + lowSigma * spread + 50)

                        self.peaks = self.alg.findPeaks(self.parent.calib,
                                                        npix_min=self.hitParam_alg1_npix_min,
                                                        npix_max=self.hitParam_alg1_npix_max,
                                                        atot_thr=0, son_min=self.hitParam_alg1_son_min,
                                                        thr_low=thr_low,
                                                        thr_high=thr_high,
                                                        mask=self.parent.mk.combinedMask.astype(np.uint16))
                elif self.algorithm == 2:
                    # v2 - aka Adaptive peak finder
                    self.peaks = self.alg.peak_finder_v3r3(self.parent.calib,
                                                           rank=int(self.hitParam_alg1_rank),
                                                           r0=self.peakRadius,
                                                           dr=self.hitParam_alg1_dr,
                                                           nsigm=self.hitParam_alg1_son_min,
                                                           mask=self.parent.mk.combinedMask.astype(np.uint16))#)#thr=self.hitParam_alg2_thr, r0=self.peakRadius, dr=self.hitParam_alg2_dr)
                elif self.algorithm == 3:
                    # perform binning here
                    binr = 2
                    binc = 2
                    downCalib = sm.block_reduce(self.parent.calib, block_size=(1, binr, binc), func=np.sum)
                    downWeight = sm.block_reduce(self.parent.mk.combinedMask, block_size=(1, binr, binc), func=np.sum)
                    warr = np.zeros_like(downCalib, dtype='float32')
                    ind = np.where(downWeight > 0)
                    warr[ind] = downCalib[ind] / downWeight[ind]
                    upCalib = utils.upsample(warr, self.parent.calib.shape, binr, binc)
                    self.peaks = self.alg.peak_finder_v3r3(upCalib,
                                                           rank=int(self.hitParam_alg1_rank),
                                                           r0=self.peakRadius,
                                                           dr=self.hitParam_alg1_dr,
                                                           nsigm=self.hitParam_alg1_son_min,
                                                           mask=self.parent.mk.combinedMask.astype(np.uint16))
                self.numPeaksFound = self.peaks.shape[0]

                if self.numPeaksFound > self.minPeaks and self.numPeaksFound < self.maxPeaks:# and self.turnOnAutoPeaks:
                    cenX = self.iX[np.array(self.peaks[:, 0], dtype=np.int64),
                                   np.array(self.peaks[:, 1], dtype=np.int64),
                                   np.array(self.peaks[:, 2], dtype=np.int64)] + 0.5
                    cenY = self.iY[np.array(self.peaks[:, 0], dtype=np.int64),
                                   np.array(self.peaks[:, 1], dtype=np.int64),
                                   np.array(self.peaks[:, 2], dtype=np.int64)] + 0.5

                    x = cenX - self.parent.cx # args.center[0]
                    y = cenY - self.parent.cy # args.center[1]

                    pixSize = float(self.parent.det.pixel_size(self.parent.evt))
                    detdis = float(self.parent.detectorDistance)
                    z = detdis / pixSize * np.ones(x.shape)  # pixels
                    wavelength = 12.407002 / float(self.parent.photonEnergy)  # Angstrom
                    norm = np.sqrt(x ** 2 + y ** 2 + z ** 2)
                    qPeaks = (np.array([x, y, z]) / norm - np.array([[0.], [0.], [1.]])) / wavelength
                    [meanClosestNeighborDist, self.pairsFoundPerSpot] = self.calculate_likelihood(qPeaks)
                else:
                    self.pairsFoundPerSpot = 0
                if self.parent.args.v >= 1: print("Num peaks found: ", self.numPeaksFound, self.peaks.shape, self.pairsFoundPerSpot)

                # update clen
                self.parent.geom.updateClen(self.parent.facility)

                self.parent.index.clearIndexedPeaks()

                # Save image and peaks in cheetah cxi file
                self.saveCheetahFormat(self.parent.facility)

                if self.parent.index.showIndexedPeaks: self.parent.index.updateIndex()

                self.drawPeaks()
            if self.parent.args.v >= 1: print("Done updateClassification")
Пример #28
0
	def __init__(self,img_size=32,num_layers=32,feature_size=256,scale=2,output_channels=3):
		print("Building EDSR...")
		#Placeholder for image inputs
		self.input = x = tf.placeholder(tf.float32,[None,img_size,img_size,output_channels])
		#Placeholder for upscaled image ground-truth
		self.target = y = tf.placeholder(tf.float32,[None,img_size*scale,img_size*scale,output_channels])
	
		"""
		Preprocessing as mentioned in the paper, by subtracting the mean
		However, the subtract the mean of the entire dataset they use. As of
		now, I am subtracting the mean of each batch
		"""
		mean_x = tf.reduce_mean(self.input)
		image_input =x- mean_x
		mean_y = tf.reduce_mean(self.target)
		image_target =y- mean_y

		#One convolution before res blocks and to convert to required feature depth
		x = slim.conv2d(image_input,feature_size,[3,3])
	
		#Store the output of the first convolution to add later
		conv_1 = x	

		"""
		This creates `num_layers` number of resBlocks
		a resBlock is defined in the paper as
		(excuse the ugly ASCII graph)
		x
		|\
		| \
		|  conv2d
		|  relu
		|  conv2d
		| /
		|/
		+ (addition here)
		|
		result
		"""

		"""
		Doing scaling here as mentioned in the paper:

		`we found that increasing the number of feature
		maps above a certain level would make the training procedure
		numerically unstable. A similar phenomenon was
		reported by Szegedy et al. We resolve this issue by
		adopting the residual scaling with factor 0.1. In each
		residual block, constant scaling layers are placed after the
		last convolution layers. These modules stabilize the training
		procedure greatly when using a large number of filters.
		In the test phase, this layer can be integrated into the previous
		convolution layer for the computational efficiency.'

		"""
		scaling_factor = 0.1
		
		#Add the residual blocks to the model
		for i in range(num_layers):
			x = utils.resBlock(x,feature_size,scale=scaling_factor)

		#One more convolution, and then we add the output of our first conv layer
		x = slim.conv2d(x,feature_size,[3,3])
		x += conv_1
		
		#Upsample output of the convolution		
		x = utils.upsample(x,scale,feature_size,None)

		#One final convolution on the upsampling output
		output =x# slim.conv2d(x,output_channels,[3,3])
		self.out = tf.clip_by_value(output+mean_x,0.0,255.0)

		self.loss = loss = tf.reduce_mean(tf.losses.absolute_difference(image_target,output))
	
		#Calculating Peak Signal-to-noise-ratio
		#Using equations from here: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
		mse = tf.reduce_mean(tf.squared_difference(image_target,output))	
		PSNR = tf.constant(255**2,dtype=tf.float32)/mse
		PSNR = tf.constant(10,dtype=tf.float32)*utils.log10(PSNR)
	
		#Scalar to keep track for loss
		tf.summary.scalar("loss",self.loss)
		tf.summary.scalar("PSNR",PSNR)
		#Image summaries for input, target, and output
		tf.summary.image("input_image",tf.cast(self.input,tf.uint8))
		tf.summary.image("target_image",tf.cast(self.target,tf.uint8))
		tf.summary.image("output_image",tf.cast(self.out,tf.uint8))
		
		#Tensorflow graph setup... session, saver, etc.
		self.sess = tf.Session()
		self.saver = tf.train.Saver()
		print("Done building!")
Пример #29
0
    def network(self):
        """
        Create the HRNET network
        """
        conv_input = conv2d(self.x, self.num_kernels,
                            self.kernel_size, var_scope='conv_input')
        bn = batch_norm(conv_input, self.is_train, var_scope='bn_input')
        act = lrelu(bn)
        branch_one_resnet_one = resnet_unit(
            act, self.num_kernels, self.is_train, var_scope='branch_one_resnet_one')

        branch_one_resnet_two = resnet_unit(
            branch_one_resnet_one, 2*self.num_kernels, self.is_train,
            var_scope='branch_one_resnet_two')

        branch_two_resnet_one = resnet_unit(
            branch_one_resnet_one, 2*self.num_kernels, self.is_train, stride=2,
            var_scope='branch_two_resnet_one')

        branch_one_upsample_shape = tf.shape(self.x)[1:3]
        branch_two_resnet_one_upsample = upsample(
            branch_two_resnet_one, branch_one_upsample_shape)
        branch_one_resnet_three_input = tf.add(
            branch_one_resnet_two, branch_two_resnet_one_upsample)

        branch_one_resnet_three = resnet_unit(
            branch_one_resnet_three_input, self.num_kernels, self.is_train,
            var_scope='branch_one_resnet_three')

        branch_two_conv_one = conv2d(
            branch_one_resnet_two, 2*self.num_kernels, 3, stride=2,
            var_scope='branch_two_conv_one')
        branch_two_bn_one = batch_norm(
            branch_two_conv_one, self.is_train, var_scope='branch_two_bn_one')
        branch_two_act_one = lrelu(branch_two_bn_one)

        branch_two_resnet_two_input = tf.add(
            branch_two_resnet_one, branch_two_act_one)

        branch_two_resnet_two = resnet_unit(
            branch_two_resnet_two_input, 2*self.num_kernels, self.is_train,
            var_scope='branch_two_resnet_two')

        branch_three_resnet_one = resnet_unit(
            branch_two_resnet_two_input, 4*self.num_kernels, self.is_train, stride=2,
            var_scope='branch_three_resnet_two_input')

        branch_two_resnet_two_upsample = upsample(
            branch_two_resnet_two, branch_one_upsample_shape)
        branch_three_resnet_one_upsample = upsample(
            branch_three_resnet_one, branch_one_upsample_shape)

        multi_res_concat = tf.concat(
            (branch_one_resnet_three, branch_two_resnet_two_upsample, branch_three_resnet_one_upsample), axis=-1)

        print('Multi resolution concat shape: ', multi_res_concat.shape)

        output_conv = conv2d(
            multi_res_concat, self.num_classes, self.kernel_size,
            var_scope='output_conv')
        return output_conv
Пример #30
0
    def __init__(self,
                 img_size=32,
                 global_layers=16,
                 local_layers=8,
                 feature_size=64,
                 scale=2,
                 output_channels=3):
        print("Building RDN...")
        self.img_size = img_size
        self.scale = scale
        self.output_channels = output_channels

        #Placeholder for image inputs
        self.input = x = tf.placeholder(tf.float32,
                                        [None, None, None, output_channels])
        #Placeholder for upscaled image ground-truth
        self.target = y = tf.placeholder(tf.float32,
                                         [None, None, None, output_channels])

        self.is_training = tf.placeholder(tf.bool, name='is_training')
        """
        Preprocessing as mentioned in the paper, by subtracting the mean
        However, the subtract the mean of the entire dataset they use. As of
        now, I am subtracting the mean of each batch
        """
        mean_x = 127
        image_input = x - mean_x
        mean_y = 127
        image_target = y - mean_y

        scaling_factor = 0.1

        x1 = slim.conv2d(image_input, feature_size, [3, 3])

        x = slim.conv2d(x1, feature_size, [3, 3])

        outputs = []
        for i in range(global_layers):
            x = utils.resDenseBlock(x,
                                    feature_size,
                                    layers=local_layers,
                                    scale=scaling_factor)
            outputs.append(x)

        x = tf.concat(outputs, 3)
        x = slim.conv2d(x, feature_size, [1, 1], activation_fn=None)

        x = slim.conv2d(x, feature_size, [3, 3])

        x = x + x1

        x = utils.upsample(x, scale, feature_size)

        #output = slim.conv2d(x,output_channels,[3,3])
        output = tf.layers.conv2d(x,
                                  output_channels, (3, 3),
                                  padding='same',
                                  use_bias=False)

        #l1 loss
        self.loss = tf.reduce_mean(
            tf.losses.absolute_difference(image_target, output))

        self.out = tf.clip_by_value(output + mean_x, 0.0, 255.0)

        #Calculating Peak Signal-to-noise-ratio
        #Using equations from here: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
        mse = tf.reduce_mean(tf.squared_difference(image_target, output))
        PSNR = tf.constant(255**2, dtype=tf.float32) / mse
        self.PSNR = tf.constant(10, dtype=tf.float32) * utils.log10(PSNR)

        #Scalar to keep track for loss
        tf.summary.scalar("loss", self.loss)
        tf.summary.scalar("PSNR", self.PSNR)
        #Image summaries for input, target, and output
        tf.summary.image("input_image", tf.cast(self.input, tf.uint8))
        tf.summary.image("target_image", tf.cast(self.target, tf.uint8))
        tf.summary.image("output_image", tf.cast(self.out, tf.uint8))

        #Tensorflow graph setup... session, saver, etc.
        self.sess = tf.Session()
        self.saver = tf.train.Saver()
        print("Done building!")
Пример #31
0
def partition_call(region_code, partition, road, railway, river, cluster_flag):
	regcode = region_code

	boundary_path = os.path.join(root_path,'data','source','administrative','boundary')

	geo_boundary = geojson.load(codecs.open(os.path.join(boundary_path, ADMIN_DICT[regcode]+'.geojson'),'r','latin-1'))
	boundary = geo_boundary['features'][0]['geometry']['coordinates'][0]
	inner = parse_inner_coords(regcode, road=road, river=river, rail=railway)

	# print 'before inner upsampling: %s, %s'%(len(inner), ADMIN_DICT[region_code])
	inner = utils.upsample(inner, 0.001)
	# print 'after inner upsampling: %s, %s'%(len(inner), ADMIN_DICT[region_code])

	boundary_pair = []
	for i in range(len(boundary)-1):
		boundary_pair.append([boundary[i],boundary[i+1]])

	coord = boundary + inner

	if len(coord)>15000:
		print 'too large...'
		return
	coord_mat = np.array(coord)

	codes = [Path.MOVETO]+[Path.LINETO]*(len(boundary)-2)+[Path.CLOSEPOLY]
	boundary_path = Path(boundary, codes)	

	tri_origin = Delaunay(coord_mat)
	tri_point = tri_origin.points
	tri_simps = tri_origin.simplices
	tri_dict = {}
	tri_centrals = []
	tri = {}
	for i,k in enumerate(tri_simps):
		f1 = boundary_path.contains_point(tuple((tri_point[k[0]]+tri_point[k[1]])/2.))
		if [list(coord[k[0]]),list(coord[k[1]])] in boundary_pair or [list(coord[k[1]]),list(coord[k[0]])] in boundary_pair:
			f1 = True
		f2 = boundary_path.contains_point(tuple((tri_point[k[1]]+tri_point[k[2]])/2.))
		if [list(coord[k[1]]),list(coord[k[2]])] in boundary_pair or [list(coord[k[2]]),list(coord[k[1]])] in boundary_pair:
			f2 = True
		f3 = boundary_path.contains_point(tuple((tri_point[k[2]]+tri_point[k[0]])/2.))
		if [list(coord[k[2]]),list(coord[k[0]])] in boundary_pair or [list(coord[k[0]]),list(coord[k[2]])] in boundary_pair:
			f3 = True
		if f1 + f2 + f3 >= 3:
			# parse central of each tri
			key = utils.tri_height([list(tri_point[k[0]]), list(tri_point[k[1]]), list(tri_point[k[2]])])
			# key = tuple(((tri_point[k[0]][0]+tri_point[k[1]][0]+tri_point[k[2]][0])/3.,(tri_point[k[0]][1]+tri_point[k[1]][1]+tri_point[k[2]][1])/3.))
			tri_centrals.append(key)
			tri[tuple(key)] = [list(tri_point[k[0]]), list(tri_point[k[1]]), list(tri_point[k[2]])]

	part_num = partition
	# part_num = int(len(tri_centrals)/4)

	# if cluster_flag == "kmeans":
	# 	clutser_centers = KMeans(n_clusters=part_num, algorithm='auto').fit(tri_centrals)
	# elif cluster_flag == 'batchkmeans':
	# 	clutser_centers = MiniBatchKMeans(n_clusters=part_num).fit(tri_centrals)
	# elif cluster_flag == 'agglomanhattan':
	# 	clutser_centers = AgglomerativeClustering(n_clusters=part_num, linkage='complete', affinity='manhattan').fit(tri_centrals)
	# else:
	# 	clutser_centers = AgglomerativeClustering(n_clusters=part_num, linkage='complete', affinity='euclidean').fit(tri_centrals)

	if part_num > 30:
		clutser_centers = AgglomerativeClustering(n_clusters=part_num, linkage='complete', affinity='euclidean').fit(tri_centrals)
	else:
		clutser_centers = KMeans(n_clusters=part_num, algorithm='auto').fit(tri_centrals)
	
	tri_cent_labels = clutser_centers.labels_

	features = []
	for i in range(part_num):
		features.append([])
	for i in range(len(tri_cent_labels)):
		features[tri_cent_labels[i]].append(tri[tuple(tri_centrals[i])])

	edges = []
	for polygons in features:
		edges.append(utils.parse_edges(polygons))
		# edges.append(polygons)

	# edges = clustering.poly_cluster(edges, partition)

	return edges