def gabor(im, W, angles): (x, y) = im.size im_load = im.load() ymage = np.asarray(im.getdata(), dtype=np.float64).reshape(im.size[0],(im.size[1])) freqs = frequency.freq(im, W, angles) print "computing local ridge frequency done" gauss = utils.gauss_kernel(3) # utils.apply_kernel(freqs, gauss) for i in range(1, x / W - 1): for j in range(1, y / W - 1): kernel = gabor_kernel(W, angles[i][j], freqs[i][j]) for k in range(0, W): for l in range(0, W): val = utils.apply_kernel_at( lambda x, y: im_load[x, y], kernel, i * W + k, j * W + l) ymage[i * W + k][j * W + l] = val # im.putpixel((i * W + k, j * W + l),val ) return Image.fromarray(ymage,mode='L')
def gabor(im, W, angles): (x, y) = im.size im_load = im.load() freqs = frequency.freq(im, W, angles) print "computing local ridge frequency done" gauss = utils.gauss_kernel(3) utils.apply_kernel(freqs, gauss) for i in range(1, x / W - 1): for j in range(1, y / W - 1): kernel = gabor_kernel(W, angles[i][j], freqs[i][j]) for k in range(0, W): for l in range(0, W): im_load[i * W + k, j * W + l] = utils.apply_kernel_at( lambda x, y: im_load[x, y], kernel, i * W + k, j * W + l) return im
adversarial_ = adversarial_.reshape(-1, 1, PATCH_HEIGHT, PATCH_WIDTH) discrim_predictions = discrim_predictions_logits(adversarial_) #texture loss discrim_target = mx.nd.concat(adv_, 1 - adv_, dim=1) loss_discrim = texture_cross_entropy(discrim_predictions, discrim_target) loss_texture = -1 * loss_discrim #content loss enhanced_vgg = vgg19_relu5_4(preprocess_vgginput(enhanced_images.as_in_context(ctx1))) dslr_vgg = vgg19_relu5_4(preprocess_vgginput(dslr_images.as_in_context(ctx1))) loss_content = 2 * content_l2loss(enhanced_vgg, dslr_vgg) / (6*6*512*batch_size) # loss color kernel_var = utils.gauss_kernel(21, 3, 3) kernel_var = mx.nd.transpose(mx.nd.array(kernel_var), (2, 3, 0, 1)) # enhanced_images_blur = mx.symbol.Convolution(data=enhanced_images, weight=kernel_var, num_group=3) enhanced_images_blur = blur_op(enhanced_images, kernel_var.as_in_context(ctx)) dlsr_images_blur = blur_op(dslr_images.as_in_context(ctx), kernel_var.as_in_context(ctx)) loss_color = color_cross_entropy(dlsr_images_blur, enhanced_images_blur, batch_size) #total variation loss batch_shape = (batch_size, 3, PATCH_WIDTH, PATCH_HEIGHT) #TODO: need get size from shape. See tf version tv_y_size = 29700 tv_x_size = 29700 loss_tvx = tvx_l2loss(enhanced_images[:,:,:,1:], enhanced_images[:,:,:,:batch_shape[2]-1]) loss_tvy = tvy_l2loss(enhanced_images[:, :, 1:, :], enhanced_images[:, :, :batch_shape[2] - 1, :]) loss_tv = 2 * (loss_tvx/tv_x_size + loss_tvy/tv_y_size) / batch_size