Beispiel #1
0
def compute_tcoherence(images):
    grad_images = []
    contributors = []
    coherences = []
    spatial_coherence = []
    # width = len(images[0][0])
    # height = len(images[0])
    rows,cols,ch = np.shape(images[0])
    print rows,cols
    print "Computing gradients of {} images".format(len(images))
    for image in images:
        grad_images.append(utils.gradient(image))
    print "Computed gradients of {} images".format(len(grad_images))
    first_frame = remove(images[0],grad_images[0])
    contributors.append(first_frame)
    coherences.append(grad_images[0])
    spatial_coherence.append()
    for i in range(1,len(images)):
        coherences.append([])
        spatial_coherence.append([])
        rc = mark(contributors[i - 1], images[i])
        rc_grad = utils.gradient(rc)
        for x in range(rows):
            coherences[i].append([])
            for y in range(cols):
                print "Computinf energy for {},{} of image number {}".format(x,y,i)
                coherences[i][x].append(total(x,y,grad_images[i],rc_grad,rows))
        print np.shape(coherences[i])
Beispiel #2
0
    def build_model(self):
        with tf.name_scope('input'):
            # Visible image patch
            self.ir_images = tf.compat.v1.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='ir_images')
            self.vi_images = tf.compat.v1.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='vi_images')
            self.ir_mask = tf.compat.v1.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='ir_mask')

        with tf.name_scope('Fusion'):
            self.fusion_images = STDFusion_net.STDFusion_model(
                self.vi_images, self.ir_images)
        with tf.name_scope("learn_rate"):
            self.lr = tf.placeholder(tf.float32, name='lr')

        with tf.name_scope('g_loss'):
            self.ir_mask = (self.ir_mask + 1) / 2.0
            self.ir_p_loss_train = tf.multiply(
                self.ir_mask, tf.abs(self.fusion_images - self.ir_images))
            self.vi_p_loss_train = tf.multiply(
                1 - self.ir_mask, tf.abs(self.fusion_images - self.vi_images))
            self.ir_grad_loss_train = tf.multiply(
                self.ir_mask,
                tf.abs(
                    gradient(self.fusion_images) - gradient(self.ir_images)))
            self.vi_grad_loss_train = tf.multiply(
                1 - self.ir_mask,
                tf.abs(
                    gradient(self.fusion_images) - gradient(self.vi_images)))

            self.ir_p_loss = tf.reduce_mean(self.ir_p_loss_train)
            self.vi_p_loss = tf.reduce_mean(self.vi_p_loss_train)
            self.ir_grad_loss = tf.reduce_mean(self.ir_grad_loss_train)
            self.vi_grad_loss = tf.reduce_mean(self.vi_grad_loss_train)
            self.g_loss_2 = 1 * self.vi_p_loss + 1 * self.vi_grad_loss + 7 * self.ir_p_loss + 7 * self.ir_grad_loss

            # tf.compat.v1.summary.scalar which is used to display scalar information
            # used to display loss
            tf.compat.v1.summary.scalar('g_loss_2', self.g_loss_2)
            self.g_loss_total = 1 * self.g_loss_2
            # display total_loss
            tf.compat.v1.summary.scalar('loss_g', self.g_loss_total)

        self.saver = tf.compat.v1.train.Saver(max_to_keep=50)

        with tf.name_scope('image'):
            tf.compat.v1.summary.image(
                'vi_image', tf.expand_dims(self.vi_images[1, :, :, :], 0))
            tf.compat.v1.summary.image(
                'ir_image', tf.expand_dims(self.ir_images[1, :, :, :], 0))
            tf.compat.v1.summary.image(
                'fusion_images',
                tf.expand_dims(self.fusion_images[1, :, :, :], 0))
Beispiel #3
0
 def gen_loss_func(self, fusion_output, fusion_img, vis_img, inf_img):
     x1 = fusion_output - torch.Tensor(fusion_output.shape).uniform_(
         0.7, 1.2).to(device)
     x2 = fusion_img - inf_img
     x3 = utils.gradient(fusion_img) - utils.gradient(vis_img)
     gan_loss = torch.mean(torch.mul(x1, x1))
     content_loss = torch.mean(torch.mul(x2,x2)) + \
                    self.epsilon * torch.mean(torch.mul(x3,x3))
     return gan_loss + self.lda * content_loss, gan_loss, self.lda * content_loss
Beispiel #4
0
        def gradient_loss():
            masked_gt = mask_true * gt
            masked_pred = mask_true * pred

            # compute gradient on x and y axis
            gt_grads_x, gt_grads_y = utils.gradient(masked_gt)
            pred_grads_x, pred_grads_y = utils.gradient(masked_pred)

            diff_x = gt_grads_x - pred_grads_x
            diff_y = gt_grads_y - pred_grads_y

            return tf.reduce_mean(tf.abs(diff_x)) + tf.reduce_mean(tf.abs(diff_y))
Beispiel #5
0
    def build_model(self):
        with tf.name_scope('IR_input'):
            self.images_ir = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_ir')
            self.labels_ir = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_ir')
        with tf.name_scope('VI_input'):
            self.images_vi = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_vi')
            self.labels_vi = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_vi')

        with tf.name_scope('input'):
            self.input_image_ir = tf.concat(
                [self.labels_ir, self.labels_ir, self.labels_vi], axis=-1)
            self.input_image_vi = tf.concat(
                [self.labels_vi, self.labels_vi, self.labels_ir], axis=-1)

        with tf.name_scope('fusion'):
            self.fusion_image = self.fusion_model(self.input_image_ir,
                                                  self.input_image_vi)

        with tf.name_scope('g_loss'):
            self.g_loss_2 = tf.reduce_mean(
                tf.square(self.fusion_image - self.labels_ir)
            ) + 2 * tf.reduce_mean(
                tf.square(
                    gradient(self.fusion_image) - gradient(self.labels_vi)))
            tf.summary.scalar('g_loss_2', self.g_loss_2)
            self.g_loss_total = 100 * self.g_loss_2
            tf.summary.scalar('loss_g', self.g_loss_total)
        self.saver = tf.train.Saver(max_to_keep=50)

        with tf.name_scope('image'):
            tf.summary.image('input_ir',
                             tf.expand_dims(self.labels_ir[1, :, :, :], 0))
            tf.summary.image('input_vi',
                             tf.expand_dims(self.labels_vi[1, :, :, :], 0))
            tf.summary.image('fusion_image',
                             tf.expand_dims(self.fusion_image[1, :, :, :], 0))
Beispiel #6
0
 def __init__(self, *args, **kwargs):
     super(ClassicNewton, self).__init__(*args, **kwargs)
     hess = kwargs.get("hess", None)
     if hess is None:
         self.hess = ut.gradient(self.grad)
     else:
         self.hess = hess
def MMF_sliding_window(user_rating, train, Itrain, k, win_size=5):

    users = np.random.rand(user_rating.shape[0], 5)
    movies = np.random.randint(4, size=(user_rating.shape[1], 5))

    errors = []
    window_error = []

    mean1 = 0
    mean2 = 0
    for i in range(k):
        result = np.multiply((train - users.dot(movies.T)), Itrain)
        users, movies = gradient(users, movies, result, 0.000001)
        error = np.sum(np.square(result))
        errors.append(error)

        if i != 0 and i % win_size == 0:

            if i == win_size:
                mean1 = np.mean(window_error)
            else:
                mean2 = np.mean(window_error)
                if mean2 < mean1:
                    break
                mean1 = mean2
            window_error = []
        else:
            window_error.append(error)

    return errors, users, movies
Beispiel #8
0
    def backward_G(self):
        img = self.img
        img_re = self.img_re
        img_g = gradient(img)
        self.img_down = self.downsample(img)
        self.img_g = img_g
        # print(self.g1.sum(),self.g2.sum(),self.g3.sum(),img_g.sum())
        # print(self.g1.mean(), self.g2.mean(), self.g3.mean(), img_g.mean())
        g1 = self.MSE_fun(self.g1, img_g)
        g2 = self.MSE_fun(self.g2, img_g)
        g3 = self.MSE_fun(self.g3, img_g)
        grd_loss = g1 + g2 + g3
        self.lossg1, self.lossg2, self.lossg3 = g1, g2, g3
        # grd_loss = self.MSE_fun(self.g1, img_g) + self.MSE_fun(self.g2, img_g) + self.MSE_fun(self.g3, img_g)
        ssim_loss = 1 - self.SSIM_fun(img_re, img)
        ssim_loss = ssim_loss * 10
        pixel_loss = self.MSE_fun(img_re, img)
        pixel_loss = pixel_loss * 100

        loss_G = self.mulGANloss(self.D(self.s), is_real=True) * 0.1

        # 损失求和 回传
        loss = pixel_loss + ssim_loss + grd_loss + loss_G

        loss.backward()
        self.loss, self.pixel_loss, self.ssim_loss, self.grd_loss = loss, pixel_loss, ssim_loss, grd_loss
        self.loss_G = loss_G
Beispiel #9
0
    def __init__(self, f, x0, use_exact_ls=True, thres=1e-7, grad=None, max_iter=1000):
        self.f = f
        self.x0 = x0
        self.use_exact_ls = use_exact_ls
        self.thres = thres
        self.max_iter = max_iter

        if grad is None:
            self.grad = ut.gradient(f)
        else:
            self.grad = grad
Beispiel #10
0
        def get_gradient_color(percentage):

            percentage = 50 + int(percentage) / 2
            return gradient(
                Color.red(),
                Color.magenta(),
                Color.lighter_grey(),
                Color.teal(),
                Color.green(),
                percentage=percentage,
            )
Beispiel #11
0
    def saveimgdemo(self):
        self.img_down = self.downsample(self.img)
        self.img_g = gradient(self.img)

        img = torchvision.utils.make_grid([
            self.img[0].cpu(), self.img_re[0].cpu(), self.img_down[0].cpu(),
            self.img_g[0].cpu(), self.s[0].cpu(),
            self.g1[0].cpu(), self.g2[0].cpu(), self.g3[0].cpu(),
            (self.g1 + self.g2 + self.g3)[0].cpu()
        ],
                                          nrow=5)
        torchvision.utils.save_image(img, fp=(os.path.join('demo_result.jpg')))
Beispiel #12
0
    def saveimgfuse(self, name=''):
        self.img_down = self.downsample(self.img)
        self.img_g = gradient(self.img)

        img = torchvision.utils.make_grid([
            self.img[0].cpu(), self.img_g[0].cpu(),
            ((self.g1 + self.g2 + self.g3) * 1.5)[0].cpu()
        ],
                                          nrow=3)
        torchvision.utils.save_image(img,
                                     fp=(os.path.join(
                                         name.replace('Test', 'demo'))))
def MMF(user_rating, train, Itrain, k):

    users = np.random.rand(user_rating.shape[0], 5)
    movies = np.random.randint(4, size=(user_rating.shape[1], 5))

    error = []
    for i in range(k):
        result = np.multiply((train - users.dot(movies.T)), Itrain)
        users, movies = gradient(users, movies, result, 0.000001)
        error.append(np.sum(np.square(result)))

    return error, users, movies
 def render(this, dest, destpos, viewpos):
     if (not this.topSurf or 
         dest.get_width() != this.topSurf.get_width()):
         # Render the top and bottom curtains
         this.topSurf = utils.gradient(
             this.startColor, this.endColor, 
             (dest.get_width(), this.size))
         this.btmSurf = pygame.transform.flip(this.topSurf, False, True)
     # Now render the creepy top and bottom 'shadow'
     dest.blit(this.topSurf, (0, destpos[1]))
     dest.blit(
         this.btmSurf, 
         (0, destpos[1]+viewpos.h-this.size))
def MMF_line_search(user_rating, train, Itrain, k, threshold=0.001):

    users = np.random.rand(user_rating.shape[0], 5)
    movies = np.random.randint(4, size=(user_rating.shape[1], 5))

    error = []
    alpha = 1 / 9000
    cost_old = 0
    cost_new = 0

    for i in range(k):
        result = np.multiply((train - users.dot(movies.T)), Itrain)
        users, movies = gradient(users, movies, result, alpha)
        cost_new = np.sum(np.square(result))
        error.append(cost_new)

        if abs(cost_old - cost_new) > threshold:
            alpha = alpha / 10
            users, movies = gradient(users, movies, result, alpha)
        else:
            break
        cost_old = cost_new

    return error, users, movies
 def render(this, dest, destpos, viewpos):
     if (not this.gradient and this.gradientSize != -1):
         # Initialise the sky gradient now
         this.gradient = utils.gradient(
             this.gradientStartColor, this.gradientEndColor, 
             (dest.get_width(), this.gradientSize))
     # Figure out where to render the backdrop on the screen
     (x, y) = (this.center[0]-viewpos.x*this.speed, 
               this.center[1]-viewpos.y*this.speed)
     # Render the sky first, then the backdrop over top, then finally
     # the ground under it.
     r = dest.blit(this.gradient, (0, y+this.backdrop.get_height()))
     dest.fill(this.gradientEndColor, 
               (0, r.bottom, dest.get_width(),
                dest.get_height()-r.bottom))
     dest.blit(this.backdrop, (x, y))
    def get_shrunk_channels(self, src):
        shrink = self.options["shrink"]
        n_orient = self.options["n_orient"]
        grd_smooth_rad = self.options["grd_smooth_rad"]
        grd_norm_rad = self.options["grd_norm_rad"]

        luv = rgb2luv(src)
        size = (luv.shape[0] / shrink, luv.shape[1] / shrink)
        channels = [resize(luv, size)]

        for scale in [1.0, 0.5]:
            img = resize(luv, (luv.shape[0] * scale, luv.shape[1] * scale))
            img = conv_tri(img, grd_smooth_rad)

            magnitude, orientation = gradient(img, grd_norm_rad)

            downscale = max(1, int(shrink * scale))
            hist = histogram(magnitude, orientation, downscale, n_orient)

            channels.append(resize(magnitude, size)[:, :, None])
            channels.append(resize(hist, size))

        channels = N.concatenate(channels, axis=2)

        reg_smooth_rad = self.options["reg_smooth_rad"] / float(shrink)
        ss_smooth_rad = self.options["ss_smooth_rad"] / float(shrink)

        if reg_smooth_rad > 1.0:
            reg_ch = conv_tri(channels, int(round(reg_smooth_rad)))
        else:
            reg_ch = conv_tri(channels, reg_smooth_rad)

        if ss_smooth_rad > 1.0:
            ss_ch = conv_tri(channels, int(round(ss_smooth_rad)))
        else:
            ss_ch = conv_tri(channels, ss_smooth_rad)

        return reg_ch, ss_ch
Beispiel #18
0
def traceContour(imgInitiale, imgNettoyee):

    # On applique un gradient avec un gamma8
    structElement = strel.build('diamant', 1, 0)
    imageNiveauGris = cv2.cvtColor(imgNettoyee, cv2.COLOR_BGR2GRAY)
    imgAvecGradient = utils.gradient(imageNiveauGris, structElement)

    # Puis on applique un seuil
    seuil = 1
    imgSeuilee = utils.appliquerSeuil(imgAvecGradient, seuil)

    # On reconstitue une image couleur a partir de l'image en niveau de gris
    imgContour = imgInitiale
    index = imgSeuilee[:] == 255
    imgContour[:] = (0, 0, 0)
    imgContour[index] = (255, 255, 255)

    elementOuverture = strel.build('disque', 1, None)
    elementReconstruction = strel.build('carre', 1, None)

    imgContour = utils.ouvertureReconstruction(imgContour, elementOuverture, elementReconstruction)
    imgContour = utils.fermetureReconstruction(imgContour, elementOuverture, elementReconstruction)

    return imgContour
Beispiel #19
0
def delta_theta(inputs, labels, query, theta, tau, lamb):
    weights = create_weights(inputs, query, tau)
    inv_hessian = inverse_hessian(inputs, query, theta, weights, lamb)
    grad = gradient(inputs, labels, query, theta, weights, lamb)
    return np.matmul(inv_hessian, grad)
Beispiel #20
0
def process(args):
    starttime = time.time()
    file = args.train
    file1 = args.train1
    link_file = args.link
    alpha = args.alpha
    window = args.window
    negative = args.negative
    iteration = args.iter
    output_file = args.output
    output_file1 = args.output1
    save_vocab_file = args.save_vacab
    layer1_size = args.size
    sample = args.sample
    min_count = args.mini_count

    sentences = read_file(file)
    sentences1 = read_file(file1)

    iter = 60
    link, link1 = build_link(link_file)
    model = Word2Vec(sentences, size=layer1_size, alpha=alpha, window=window, min_count=min_count,
                     max_vocab_size=100000, sample=1e-3, seed=1,
                     negative=negative, hashfxn=hash, iter=iteration, null_word=0,
                     trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH)
    model1 = Word2Vec(sentences1, size=layer1_size, alpha=alpha, window=window, min_count=min_count,
                      max_vocab_size=100000, sample=1e-3, seed=1,
                      negative=negative, hashfxn=hash, iter=iteration, null_word=0,
                      trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH)
    gpar = 0
    z = 0
    gpar1 = 0
    z1 = 0
    G = 60
    G1 = 61
    t = 0
    t1 = 0

    syn01, gpar1, z1, t1 = model1.train(model_=model, sentences=sentences1, Gpar=gpar1, G=G1, link=link1, Z=z,
                                        epochs=iter,
                                        word_num=t1, start_alpha=alpha)
    syn0, gpar, z, t = model.train(model_=model1, sentences=sentences, Gpar=gpar, Z=z, G=G, link=link, epochs=iter,
                                   word_num=t,
                                   start_alpha=alpha)

    for n in range(1, iteration):
        syn0, gpar, map0, z, t = model.train(model_=model1, sentences=sentences, Gpar=gpar, Z=z, G=G, link=link,
                                             epochs=iter, word_num=t, start_alpha=alpha)
        syn01, gpar1, map1, z1, t1 = model1.train(model_=model, sentences=sentences1, Gpar=gpar1, G=G1, link=link1,
                                                  Z=z1, epochs=iter, word_num=t1, start_alpha=alpha)

    index = model.wv.index2word
    index1 = model1.wv.index2word
    gradient(syn0, index, gpar, G, 0)
    gradient(syn01, index1, gpar1, G1, 1)

    fp = open(output_file, 'w')
    fp.write(str(len(model.wv.index2word)) + "  " + str(model.layer1_size) + '\n')
    for a in range(len(model.wv.index2word)):
        fp.write(str(model.wv.index2word[a]) + '\t')
        for b in range(0, model.layer1_size):
            fp.write(str(model.wv.syn0[a][b]) + "      	")
        fp.write('\n')
    fp.close()

    fp = open(output_file1, 'w')
    fp.write(str(len(model1.wv.index2word)) + "  " + str(model1.layer1_size) + '\n')
    for a in range(len(model1.wv.index2word)):
        fp.write(str(model1.wv.index2word[a]) + '\t')
        for b in range(0, model1.layer1_size):
            fp.write(str(model1.wv.syn0[a][b]) + "      	")
        fp.write('\n')
    fp.close()
    endtime = time.time()
    dtime = endtime - starttime
    print("Program running time:%.8s s" % dtime)
    def merge_trees(self):
        """
        Accumulate trees and merge into final model
        """

        n_tree = self.options["n_tree"]
        g_size = self.options["g_size"]

        if not os.path.exists(self.forest_dir):
            os.makedirs(self.forest_dir)

        forest_path = os.path.join(self.forest_dir, self.forest_name)
        if os.path.exists(forest_path):
            print "Found model, reusing..."
            return

        trees = []
        for i in xrange(n_tree):
            tree_file = self.tree_prefix + str(i + 1) + ".h5"
            tree_path = os.path.join(self.tree_dir, tree_file)

            with tables.open_file(tree_path, filters=self.comp_filt) as mfile:
                tree = {"fids": mfile.get_node("/fids")[:],
                        "thrs": mfile.get_node("/thrs")[:],
                        "cids": mfile.get_node("/cids")[:],
                        "segs": mfile.get_node("/segs")[:]}
            trees.append(tree)

        max_n_node = 0
        for i in xrange(n_tree):
            max_n_node = max(max_n_node, trees[i]["fids"].shape[0])

        # merge all fields of all trees
        thrs = N.zeros((n_tree, max_n_node), dtype=N.float64)
        fids = N.zeros((n_tree, max_n_node), dtype=N.int32)
        cids = N.zeros((n_tree, max_n_node), dtype=N.int32)
        segs = N.zeros((n_tree, max_n_node, g_size, g_size), dtype=N.int32)
        for i in xrange(n_tree):
            tree = trees[i]
            n_node = tree["fids"].shape[0]
            thrs[i, :n_node] = tree["thrs"].flatten()
            fids[i, :n_node] = tree["fids"].flatten()
            cids[i, :n_node] = tree["cids"].flatten()
            segs[i, :n_node] = tree["segs"]

        # remove very small segments (<=5 pixels)
        n_seg = N.max(segs.reshape((n_tree, max_n_node, g_size ** 2)), axis=2) + 1
        for i in xrange(n_tree):
            for j in xrange(max_n_node):
                m = n_seg[i, j]
                if m <= 1:
                    continue

                S = segs[i, j]
                remove = False

                for k in xrange(m):
                    Sk = (S == k)
                    if N.count_nonzero(Sk) > 5:
                        continue

                    S[Sk] = N.median(S[conv_tri(Sk.astype(N.float64), 1) > 0])
                    remove = True

                if remove:
                    S = N.unique(S, return_inverse=True)[1]
                    segs[i, j] = S.reshape((g_size, g_size))
                    n_seg[i, j] = N.max(S) + 1

        # store compact representations of sparse binary edge patches
        n_bnd = self.options["sharpen"] + 1
        edge_pts = []
        edge_bnds = N.zeros((n_tree, max_n_node, n_bnd), dtype=N.int32)
        for i in xrange(n_tree):
            for j in xrange(max_n_node):
                if cids[i, j] != 0 or n_seg[i, j] <= 1:
                    continue

                E = gradient(segs[i, j].astype(N.float64))[0] > 0.01
                E0 = 0

                for k in xrange(n_bnd):
                    r, c = N.nonzero(E & (~ E0))
                    edge_pts += [r[m] * g_size + c[m] for m in xrange(len(r))]
                    edge_bnds[i, j, k] = len(r)

                    E0 = E
                    E = conv_tri(E.astype(N.float64), 1) > 0.01

        segs = segs.reshape((-1, segs.shape[-2], segs.shape[-1]))
        edge_pts = N.asarray(edge_pts, dtype=N.int32)
        edge_bnds = N.hstack(([0], N.cumsum(edge_bnds.flatten()))).astype(N.int32)

        with tables.open_file(forest_path, "w", filters=self.comp_filt) as mfile:
            mfile.create_carray("/", "thrs", obj=thrs)
            mfile.create_carray("/", "fids", obj=fids)
            mfile.create_carray("/", "cids", obj=cids)
            mfile.create_carray("/", "edge_bnds", obj=edge_bnds)
            mfile.create_carray("/", "edge_pts", obj=edge_pts)
            mfile.create_carray("/", "n_seg", obj=n_seg)
            mfile.create_carray("/", "segs", obj=segs)
            mfile.close()
Beispiel #22
0
    def build_model(self):
        with tf.name_scope('IR_input'):
            #红外图像patch
            self.images_ir = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_ir')
            self.labels_ir = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_ir')
        with tf.name_scope('VI_input'):
            #可见光图像patch
            self.images_vi = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_vi')
            self.labels_vi = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_vi')
            #self.labels_vi_gradient=gradient(self.labels_vi)
        #将红外和可见光图像在通道方向连起来,第一通道是红外图像,第二通道是可见光图像
        with tf.name_scope('input'):
            #self.resize_ir=tf.image.resize_images(self.images_ir, (self.image_size, self.image_size), method=2)
            self.input_image_ir = tf.concat(
                [self.labels_ir, self.labels_ir, self.labels_vi], axis=-1)
            self.input_image_vi = tf.concat(
                [self.labels_vi, self.labels_vi, self.labels_ir], axis=-1)
        #self.pred=tf.clip_by_value(tf.sign(self.pred_ir-self.pred_vi),0,1)
        #融合图像
        with tf.name_scope('fusion'):
            self.fusion_image = self.fusion_model(self.input_image_ir,
                                                  self.input_image_vi)

        with tf.name_scope('g_loss'):
            #self.g_loss_1=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.ones_like(neg)))
            #self.g_loss_1=tf.reduce_mean(tf.square(neg-tf.ones_like(pos)))
            #self.g_loss_1=tf.reduce_mean(tf.square(neg-tf.random_uniform(shape=[self.batch_size,1],minval=0.7,maxval=1.2,dtype=tf.float32)))
            #tf.summary.scalar('g_loss_1',self.g_loss_1)
            #self.g_loss_2=tf.reduce_mean(tf.square(self.fusion_image - self.labels_ir))
            self.g_loss_2 = tf.reduce_mean(
                tf.square(self.fusion_image - self.labels_ir)
            ) + 0 * tf.reduce_mean(
                tf.square(self.fusion_image - self.labels_vi)
            ) + 10 * tf.reduce_mean(
                tf.square(
                    gradient(self.fusion_image) - gradient(self.labels_vi))
            ) + 2 * tf.reduce_mean(
                tf.square(
                    gradient(self.fusion_image) - gradient(self.labels_ir)))
            tf.summary.scalar('g_loss_2', self.g_loss_2)

            self.g_loss_total = 100 * self.g_loss_2
            tf.summary.scalar('loss_g', self.g_loss_total)
        self.saver = tf.train.Saver(max_to_keep=50)

        with tf.name_scope('image'):
            tf.summary.image('input_ir',
                             tf.expand_dims(self.labels_ir[1, :, :, :], 0))
            tf.summary.image('input_vi',
                             tf.expand_dims(self.labels_vi[1, :, :, :], 0))
            tf.summary.image('fusion_image',
                             tf.expand_dims(self.fusion_image[1, :, :, :], 0))
Beispiel #23
0
def render_image(args: Namespace, msg_send: bool = False) -> Tuple[Union[None, str], dict, list, utils.Vector]:
    size = utils.Vector(args.width, args.height)

    starting_point = args.starting_point
    if starting_point is not None:
        # For the user to use the coordinate indexing starting at one
        starting_point = utils.Vector(*[x - 1 for x in args.starting_point])

    color_background = tuple(args.color_background)

    nebula = Nebula(
        size,
        args.max_count,
        args.reproduce_chance,
        quadratic=args.quadratic,
        starting_point=starting_point
    )
    nebula.develop(min_percent=args.min_percent, max_percent=args.max_percent)

    colors = config_colors()

    if args.random_colors:
        colors = utils.random_colors(args.colors_number)
        gradient = utils.gradient(nebula.current_generation, colors)
    elif len(colors) > 1:
        gradient = utils.gradient(nebula.current_generation, colors)

    if args.opaque:
        for color in colors:
            color[3] = 255

    print(c.NOTIFICATION_MSG_BEFORE_RENDERING)
    sleep(1)

    image = Image.new('RGBA', (size.x, size.y))
    draw = ImageDraw.Draw(image)

    for x in range(size.x + 1):
        print(f'[{datetime.now().time()}]', 'Image drawing:', '{:.5f}'.format(x / size.x * 100) + ' %', sep='\t')
        for y in range(size.y + 1):
            if nebula.squares[x][y]:
                if len(colors) == 1:
                    max_gen = nebula.current_generation
                    gen = nebula.squares[x][y].gen

                    alpha = round((1 - gen / max_gen) * 255)
                    if args.fade_in:
                        alpha = round(gen / max_gen * 255)

                    colors[0][3] = alpha

                    draw.point([x, y], fill=tuple(colors[0]))
                else:
                    gen = nebula.squares[x][y].gen - 1
                    draw.point([x, y], fill=gradient[gen])
            else:
                draw.point([x, y], fill=color_background)

    image_name = f'{size.x}x{size.y}_{args.reproduce_chance}_{utils.generate_filename()}.png'
    image_path = None
    if args.save or msg_send:
        if args.path:
            image.save(args.path + image_name, format='PNG', optimize=True, quality=1)
            image_path = args.path + image_name
        elif msg_send:
            image.save(c.TELEGRAM_IMAGES_SAVE_PATH + c.TELERGAM_IMAGE_PREFIX + image_name, 'PNG')
            image_path = c.TELEGRAM_IMAGES_SAVE_PATH + c.TELERGAM_IMAGE_PREFIX + image_name
        else:
            image.save(image_name, 'PNG')
            image_path = image_name
    if args.dont_show_image:
        image.show()

    return image_path, vars(args), colors, nebula.starting_point
Beispiel #24
0
    # content loss
    CONTENT_LAYER = 'relu5_4'
    phone_vgg = vgg.net(vgg_dir, vgg.preprocess(phone_image * 255))
    rec_vgg = vgg.net(vgg_dir, vgg.preprocess(rec_image * 255))

    content_size = utils._tensor_size(phone_vgg[CONTENT_LAYER]) * batch_size
    loss_content = 2 * tf.nn.l2_loss(phone_vgg[CONTENT_LAYER] - rec_vgg[CONTENT_LAYER]) / content_size

    # color loss
    enhanced_blur = tf.reshape(utils.blur(enhanced), [-1, PATCH_HEIGHT, PATCH_WIDTH, 3])
    dslr_blur = tf.reshape(utils.blur(dslr_image), [-1, PATCH_HEIGHT, PATCH_WIDTH, 3])
    loss_discrim_color, loss_color, discrim_accuracy_color = models.discriminator_loss(enhanced_blur, dslr_blur, adv_, discrim_target)

    # gradient loss
    if w_gradient != 0:
        enhanced_gradient = tf.reshape(utils.gradient(enhanced_gray), [-1, PATCH_HEIGHT, PATCH_WIDTH, 2])
        dslr_gradient = tf.reshape(utils.gradient(dslr_gray), [-1, PATCH_HEIGHT, PATCH_WIDTH, 2])
        loss_discrim_gradient, loss_gradient, discrim_accuracy_gradient = models.discriminator_loss(enhanced_gradient, dslr_gradient, adv_, discrim_target)
    else:
        loss_discrim_gradient = zero_
        loss_gradient = zero_
        discrim_accuracy_gradient = zero_

    #laplacian loss
    if w_laplacian != 0:
        enhanced_laplacian = tf.reshape(utils.laplacian(enhanced_gray), [-1, PATCH_HEIGHT, PATCH_WIDTH, 1])
        dslr_laplacian = tf.reshape(utils.laplacian(dslr_gray), [-1, PATCH_HEIGHT, PATCH_WIDTH, 1])
        loss_discrim_laplacian, loss_laplacian, discrim_accuracy_laplacian = models.discriminator_loss(enhanced_laplacian, dslr_laplacian, adv_, discrim_target)
    else:
        loss_discrim_laplacian = zero_
        loss_laplacian = zero_
Beispiel #25
0
    def build_model(self):
        with tf.name_scope('IR_input'):
            self.images_ir = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_ir')
            self.labels_ir = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_ir')
        with tf.name_scope('VI_input'):

            self.images_vi = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_vi')
            self.labels_vi = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_vi')
        with tf.name_scope('Mask_input'):
            self.images_mask = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_mask')
            self.labels_mask = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_mask')

        with tf.name_scope('input'):
            self.input_image_ir = self.labels_ir
            self.input_image_vi = self.labels_vi

        with tf.name_scope('fusion'):
            self.fusion_image = self.fusion_model(self.input_image_ir,
                                                  self.input_image_vi)

        with tf.name_scope('grad_bin'):
            self.Image_vi_grad = tf.abs(gradient(self.labels_vi))
            self.Image_ir_grad = tf.abs(gradient(self.labels_ir))
            self.Image_fused_grad = tf.abs(gradient(self.fusion_image))

            # self.Image_vi_score=tf.reduce_mean(tf.square(self.Image_vi_grad))
            # self.Image_ir_score=tf.reduce_mean(tf.square(self.Image_ir_grad))
            self.Image_vi_weight = self.Image_vi_grad
            self.Image_ir_weight = self.Image_ir_grad

            # self.Image_vi_ir_grad_bin = tf.concat([self.Image_vi_grad, self.Image_ir_grad], 3)
            # self.Image_fused_grad_bin = tf.concat([self.Image_fused_grad, self.Image_fused_grad], 3)

        with tf.name_scope('image'):
            tf.summary.image('input_ir',
                             tf.expand_dims(self.labels_ir[1, :, :, :], 0))
            tf.summary.image('input_vi',
                             tf.expand_dims(self.labels_vi[1, :, :, :], 0))
            tf.summary.image('mask',
                             tf.expand_dims(self.labels_mask[1, :, :, :], 0))
            tf.summary.image('fusion_image',
                             tf.expand_dims(self.fusion_image[1, :, :, :], 0))
            tf.summary.image('Image_vi_grad',
                             tf.expand_dims(self.Image_vi_grad[1, :, :, :], 0))
            tf.summary.image('Image_ir_grad',
                             tf.expand_dims(self.Image_ir_grad[1, :, :, :], 0))
            # tf.summary.image('Image_vi_ir_grad_bin',tf.expand_dims(self.Image_vi_ir_grad_bin[1,:,:,:],0))
            # tf.summary.image('Image_fused_grad_bin',tf.expand_dims(self.Image_fused_grad_bin[1,:,:,:],0))

        with tf.name_scope('d_loss'):
            pos = self.discriminator(self.labels_mask, reuse=False)
            neg = self.discriminator(self.fusion_image,
                                     reuse=True,
                                     update_collection='NO_OPS')
            pos_loss = tf.reduce_mean(
                tf.square(pos - tf.random_uniform(shape=[self.batch_size, 1],
                                                  minval=0.7,
                                                  maxval=1.2,
                                                  dtype=tf.float32)))
            neg_loss = tf.reduce_mean(
                tf.square(neg - tf.random_uniform(shape=[self.batch_size, 1],
                                                  minval=0,
                                                  maxval=0.3,
                                                  dtype=tf.float32)))
            self.d_loss = neg_loss + pos_loss
            tf.summary.scalar('loss_d', self.d_loss)

        with tf.name_scope('g_loss'):
            self.g_loss_1 = tf.reduce_mean(
                tf.square(neg - tf.random_uniform(shape=[self.batch_size, 1],
                                                  minval=0.7,
                                                  maxval=1.2,
                                                  dtype=tf.float32)))
            tf.summary.scalar('g_loss_1', self.g_loss_1)

            self.g_loss_int =  tf.reduce_mean((self.Image_vi_weight-tf.minimum(self.Image_vi_weight,self.Image_ir_weight))*tf.square(self.fusion_image - self.labels_vi)) +\
                               tf.reduce_mean((self.Image_ir_weight-tf.minimum(self.Image_vi_weight,self.Image_ir_weight))*tf.square(self.fusion_image - self.labels_ir))

            self.g_loss_grau = tf.reduce_mean((self.Image_vi_weight-tf.minimum(self.Image_vi_weight,self.Image_ir_weight))*tf.square(gradient(self.fusion_image) - gradient(self.labels_vi))) + \
                               tf.reduce_mean((self.Image_ir_weight-tf.minimum(self.Image_vi_weight,self.Image_ir_weight)) * tf.square(gradient(self.fusion_image) - gradient(self.labels_ir)))

            self.g_loss_ssim = tf.reduce_mean(self.Image_ir_weight)*(1 - tf_ssim(self.labels_ir, self.fusion_image))+\
                               tf.reduce_mean(self.Image_vi_weight)*(1 - tf_ssim(self.labels_vi, self.fusion_image))

            self.g_loss_2 = 3 * self.g_loss_grau + 10 * self.g_loss_ssim + self.g_loss_int

            tf.summary.scalar('self.g_loss_int', self.g_loss_int)
            tf.summary.scalar('self.g_loss_grau', self.g_loss_grau)
            tf.summary.scalar('self.g_loss_ssim', self.g_loss_ssim)
            tf.summary.scalar('g_loss_2', self.g_loss_2)
            self.g_loss_total = self.g_loss_1 + self.g_loss_2
            tf.summary.scalar('loss_g', self.g_loss_total)

        self.saver = tf.train.Saver(max_to_keep=50)
    model_preds = np.array(model_preds.tolist(), dtype=np.float)
    ale_pred = np.array(ale_pred.tolist(), dtype=np.float)
    # model_preds = model_preds.data.numpy()
    # ale_pred = ale_pred.data.numpy()
    return model_preds, ale_pred


if __name__ == '__main__':
    # args = parse_predict_args()
    file_path = f'./saved_models/pred_output/seed60_test/hidden_vector_0.npy'
    mol_fp = np.load(file_path)
    # mol_i = mol_fp[0, :]
    print(mol_fp.shape)
    grad_rmss = []
    grad_maxx = []
    # smiles = pd.read_csv('./saved_models/qm9_ens_woN/fold_0/qm9_N.csv').values[:, 0]
    smiles = pd.read_csv('./saved_models/pred_output/seed60_test/test_pred.csv').values[:, 0]
    for i, smile in zip(range(mol_fp.shape[0]), smiles):  # mol_fp.shape[0]
        mol_i = mol_fp[i, :]
        # print(mol_i[:200])
        points = make_points(mol_i)
        # print(points[0])
        pred, ale = predict_i(points)
        grad_rms, grad_max = gradient(pred)
        grad_rmss.append(grad_rms)
        grad_maxx.append(grad_max)
        print(smile, i+2, grad_rms, grad_max, pred[0, 0], ale[0, 0])  # , pred[0], ale[0]
    pd.DataFrame(np.array([list(smiles), grad_rmss, grad_maxx], dtype=np.float).T, index=None, columns=['smiles', 'grad_rms', 'grad_max']).\
        to_csv('./saved_models/pred_output/woN_test/grad_m.csv', index=False)

Beispiel #27
0
    for index in tqdms:

        if index == KASIST_num:
            img_name = "TNO"
        # img = next(imgs).to(device)
        if index < KASIST_num:
            img = next(imgs_K).to(device)
        else:
            # print(2)
            img = next(imgs_T).to(device)
        optimizer.zero_grad()

        img_re = model(img)

        mse_loss = MSE_fun(img, img_re)
        grd_loss = MSE_fun(gradient(img), gradient(img_re))
        hist_loss = hist_similar(img, img_re.detach()) * 0.001
        # std_loss = torch.abs(img_re.std() - img.std())
        std_loss = hist_loss

        # 感知损失
        with torch.no_grad():
            x = img.detach()
        features = loss_network(x)
        features_re = loss_network(img_re)

        with torch.no_grad():
            f_x_vi1 = features[1].detach()
            f_x_vi2 = features[2].detach()
            f_x_ir3 = features[3].detach()
            f_x_ir4 = features[4].detach()
Beispiel #28
0
    def merge_trees(self):
        """
        Accumulate trees and merge into final model
        """

        n_tree = self.options["n_tree"]
        g_size = self.options["g_size"]

        if not os.path.exists(self.forest_dir):
            os.makedirs(self.forest_dir)

        forest_path = os.path.join(self.forest_dir, self.forest_name)
        if os.path.exists(forest_path):
            print("Found model, reusing...")
            return

        trees = []
        for i in xrange(n_tree):
            tree_file = self.tree_prefix + str(i + 1) + ".h5"
            tree_path = os.path.join(self.tree_dir, tree_file)

            with tables.open_file(tree_path, filters=self.comp_filt) as mfile:
                tree = {"fids": mfile.get_node("/fids")[:],
                        "thrs": mfile.get_node("/thrs")[:],
                        "cids": mfile.get_node("/cids")[:],
                        "segs": mfile.get_node("/segs")[:]}
            trees.append(tree)

        max_n_node = 0
        for i in xrange(n_tree):
            max_n_node = max(max_n_node, trees[i]["fids"].shape[0])

        # merge all fields of all trees
        thrs = N.zeros((n_tree, max_n_node), dtype=N.float64)
        fids = N.zeros((n_tree, max_n_node), dtype=N.int32)
        cids = N.zeros((n_tree, max_n_node), dtype=N.int32)
        segs = N.zeros((n_tree, max_n_node, g_size, g_size), dtype=N.int32)
        for i in xrange(n_tree):
            tree = trees[i]
            n_node = tree["fids"].shape[0]
            thrs[i, :n_node] = tree["thrs"].flatten()
            fids[i, :n_node] = tree["fids"].flatten()
            cids[i, :n_node] = tree["cids"].flatten()
            segs[i, :n_node] = tree["segs"]

        # remove very small segments (<=5 pixels)
        n_seg = N.max(segs.reshape((n_tree, max_n_node, g_size ** 2)), axis=2) + 1
        for i in xrange(n_tree):
            for j in xrange(max_n_node):
                m = n_seg[i, j]
                if m <= 1:
                    continue

                S = segs[i, j]
                remove = False

                for k in xrange(m):
                    Sk = (S == k)
                    if N.count_nonzero(Sk) > 5:
                        continue

                    S[Sk] = N.median(S[conv_tri(Sk.astype(N.float64), 1) > 0])
                    remove = True

                if remove:
                    S = N.unique(S, return_inverse=True)[1]
                    segs[i, j] = S.reshape((g_size, g_size))
                    n_seg[i, j] = N.max(S) + 1

        # store compact representations of sparse binary edge patches
        n_bnd = self.options["sharpen"] + 1
        edge_pts = []
        edge_bnds = N.zeros((n_tree, max_n_node, n_bnd), dtype=N.int32)
        for i in xrange(n_tree):
            for j in xrange(max_n_node):
                if cids[i, j] != 0 or n_seg[i, j] <= 1:
                    continue

                E = gradient(segs[i, j].astype(N.float64))[0] > 0.01
                E0 = 0

                for k in xrange(n_bnd):
                    r, c = N.nonzero(E & (~ E0))
                    edge_pts += [r[m] * g_size + c[m] for m in xrange(len(r))]
                    edge_bnds[i, j, k] = len(r)

                    E0 = E
                    E = conv_tri(E.astype(N.float64), 1) > 0.01

        segs = segs.reshape((-1, segs.shape[-2], segs.shape[-1]))
        edge_pts = N.asarray(edge_pts, dtype=N.int32)
        edge_bnds = N.hstack(([0], N.cumsum(edge_bnds.flatten()))).astype(N.int32)

        with tables.open_file(forest_path, "w", filters=self.comp_filt) as mfile:
            mfile.create_carray("/", "thrs", obj=thrs)
            mfile.create_carray("/", "fids", obj=fids)
            mfile.create_carray("/", "cids", obj=cids)
            mfile.create_carray("/", "edge_bnds", obj=edge_bnds)
            mfile.create_carray("/", "edge_pts", obj=edge_pts)
            mfile.create_carray("/", "n_seg", obj=n_seg)
            mfile.create_carray("/", "segs", obj=segs)
            mfile.close()
Beispiel #29
0
                coherences[i][x].append(total(x,y,grad_images[i],rc_grad,rows))
        print np.shape(coherences[i])




def imageInfo(img):
    return [len(img[0]), len(img)]

cap = cv2.VideoCapture("./videos/pit.mkv")
frameCTR= 0
images = []
numberOfFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
while True:
    ret, frame = cap.read()
    frameCTR += 1
    if not ret:
        break
    images.append(frame)
    print "Frame dims are {} * {} and frame number is {}/{}".format(imageInfo(frame)[0],imageInfo(frame)[1],frameCTR,numberOfFrames)
    if frameCTR%10 == 2:
        # saliency = utils.saliency(frame)
        grad = utils.gradient(frame)
        # utils.showImage(grad,"saliency")
        # remove(frame,grad)
        compute_tcoherence(images)
        break



print frameCTR
def train_fmin(func, cloud, max_iterations, var_epsilon, learning_rate, method,
               N, kernel_a, alpha_init, alpha_rate, beta, gamma, verbose):

    from optimizers import update_cloud, update_cloud_derivative_free, update_gd_func
    from utils import gradient

    alpha = alpha_init

    # initiation of lists storing the history
    cost_history = []
    cost_history_mean = []
    elapsed_iterations = 0

    # performing calculations for subsequent iterations
    for i in range(max_iterations):

        function_values = func(cloud.T)

        if method in ["gradient_descent", "swarm"]:
            gradients = gradient(func, cloud.T).T

        if method == "swarm":
            cloud, cloud_var = update_cloud(cloud, gradients, N, learning_rate,
                                            kernel_a, alpha, beta, gamma)
        elif method == "swarm_derivfree":
            cloud, cloud_var = update_cloud_derivative_free(
                cloud, function_values, N, learning_rate, kernel_a, alpha,
                beta, gamma)
        elif method == "gradient_descent":
            cloud, cloud_var = update_gd_func(cloud, gradients, learning_rate)
        else:
            raise Exception("No method found")

        #end of iteration
        cost_history.append(function_values)

        #mean position
        cloud_mean = np.mean(cloud, axis=0)
        cloud_mean_func = func(cloud_mean)
        cost_history_mean.append(cloud_mean_func)

        #end of epoch----------------
        cloud_var = np.mean(
            cloud_var)  #mean of variances along dimensions of parameter space

        if (verbose):
            print(
                "Iteration: {:05} - Cloud mean cost: {:.5f} - Cloud variance: {:.5f}"
                .format(i, cloud_mean_func, cloud_var))

        alpha = alpha + alpha_rate
        elapsed_iterations += 1

        if cloud_var < var_epsilon:
            print("Convergence achieved - Particles are localized")
            break

    print("\nFunction value at cloud mean: " + str(cloud_mean_func))
    print("Function evaluated {:01} times".format(int(elapsed_iterations * N)))

    return cloud, cloud_mean, cloud_var, cost_history, cost_history_mean
Beispiel #31
0
    dx = x[1] - x[0]
    dy = y[1] - y[0]

    constants = easydict.EasyDict({
        "N": N,
        "dx": dx,
        "dy": dy,
    })

    n = np.cos(np.pi * X) * np.cos(np.pi * Y)
    dnx = -np.pi * np.sin(np.pi * X) * np.cos(np.pi * Y)
    dny = -np.pi * np.cos(np.pi * X) * np.sin(np.pi * Y)
    d2n = -2. * np.pi**2. * np.cos(np.pi * X) * np.cos(np.pi * Y)
    ng = get_ghosts(n, constants)

    grad_nx, grad_ny = gradient(ng, constants)
    lap_n = laplacian(ng, constants)
    err_1x[k] = np.amax(abs(grad_nx - dnx) / np.amax(abs(dnx)))
    err_1y[k] = np.amax(abs(grad_ny - dny) / np.amax(abs(dny)))
    err_2[k] = np.amax(abs(lap_n - d2n) / np.amax(abs(d2n)))

    plotname = figure_path + '/error_lap_%i.png' % N
    fig = plt.figure(figsize=(16, 4.5))
    plt.subplot(1, 4, 1)
    cs = plt.contourf(
        X, Y, np.abs(d2n - lap_n), 200, cmap='gist_yarg'
    )  #,color='goldenrod',linewidth=3) #,linestyle='None',marker='.')
    plt.colorbar(cs)
    plt.title(r"$|$error$|$", fontsize=16)
    plt.ylabel(r"$y$", fontsize=16)
    plt.xlabel(r"$x$", fontsize=16)
Beispiel #32
0
    def build_model(self):
        with tf.name_scope('IR_input'):
            self.images_ir = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_ir')
            self.labels_ir = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_ir')
        with tf.name_scope('VI_input'):

            self.images_vi = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_vi')
            self.labels_vi = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_vi')
        with tf.name_scope('Mask_input'):
            self.images_mask = tf.placeholder(
                tf.float32,
                [None, self.image_size, self.image_size, self.c_dim],
                name='images_mask')
            self.labels_mask = tf.placeholder(
                tf.float32,
                [None, self.label_size, self.label_size, self.c_dim],
                name='labels_mask')

        # with tf.name_scope('IR_inputtu'):
        #     self.images_irtu = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.c_dim], name='images_irtu')
        #     self.labels_irtu = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.c_dim], name='labels_irtu')
        #
        # with tf.name_scope('VI_inputtu'):
        #     self.images_vitu = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.c_dim], name='images_vitu')
        #     self.labels_vitu = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.c_dim], name='labels_vitu')

        with tf.name_scope('input'):
            self.input_image_ir = self.labels_ir
            self.input_image_vi = self.labels_vi
            self.input_image_mask = self.labels_mask
            # self.input_image_irtu = self.labels_irtu
            # self.input_image_vitu = self.labels_vitu

        with tf.name_scope('fusion'):
            # self.fusion_image=self.fusion_model(self.input_image_ir,self.input_image_vi)
            self.fusion_map, self.fusion_mask = self.fusion_model(
                self.input_image_ir, self.input_image_vi)
            self.fusion_image = self.fusion_mask * self.labels_vi + (
                1 - self.fusion_mask) * self.labels_ir

        with tf.name_scope('grad_bin'):
            self.Image_vi_grad = gradient(self.labels_vi)
            self.Image_ir_grad = gradient(self.labels_ir)
            self.Image_fused_grad = gradient(self.fusion_image)
            self.Image_max_grad = tf.round(
                (self.Image_vi_grad + self.Image_ir_grad) //
                (tf.abs(self.Image_vi_grad + self.Image_ir_grad) +
                 0.0000000001)) * tf.maximum(tf.abs(self.Image_vi_grad),
                                             tf.abs(self.Image_ir_grad))
            # self.Image_vi_score=tf.reduce_mean(tf.square(self.Image_vi_grad))
            # self.Image_ir_score=tf.reduce_mean(tf.square(self.Image_ir_grad))

            # self.Image_vi_ir_grad_bin = tf.concat([self.Image_vi_grad, self.Image_ir_grad], 3)
            # self.Image_fused_grad_bin = tf.concat([self.Image_fused_grad, self.Image_fused_grad], 3)

        with tf.name_scope('image'):
            tf.summary.image('input_ir',
                             tf.expand_dims(self.labels_ir[1, :, :, :], 0))
            tf.summary.image('input_vi',
                             tf.expand_dims(self.labels_vi[1, :, :, :], 0))
            # tf.summary.image('input_irtu',tf.expand_dims(self.labels_irtu[1,:,:,:],0))
            # tf.summary.image('input_vitu',tf.expand_dims(self.labels_vitu[1,:,:,:],0))
            tf.summary.image('mask',
                             tf.expand_dims(self.labels_mask[1, :, :, :], 0))
            tf.summary.image('fusion_map',
                             tf.expand_dims(self.fusion_map[1, :, :, :], 0))
            tf.summary.image('fusion_mask',
                             tf.expand_dims(self.fusion_mask[1, :, :, :], 0))
            tf.summary.image('fusion_image',
                             tf.expand_dims(self.fusion_image[1, :, :, :], 0))
            # tf.summary.image('Image_vi_grad',tf.expand_dims(self.Image_vi_grad[1,:,:,:],0))
            # tf.summary.image('Image_ir_grad',tf.expand_dims(self.Image_ir_grad[1,:,:,:],0))
            # tf.summary.image('Image_max_grad', tf.expand_dims(self.Image_max_grad[1, :, :, :], 0))
            # tf.summary.image('Image_vi_ir_grad_bin',tf.expand_dims(self.Image_vi_ir_grad_bin[1,:,:,:],0))
            # tf.summary.image('Image_fused_grad_bin',tf.expand_dims(self.Image_fused_grad_bin[1,:,:,:],0))

        # with tf.name_scope('d_loss'):
        #     pos=self.discriminator(self.labels_mask,reuse=False)
        #     neg=self.discriminator(self.fusion_image,reuse=True,update_collection='NO_OPS')
        #     pos_loss=tf.reduce_mean(tf.square(pos-tf.random_uniform(shape=[self.batch_size,1],minval=0.7,maxval=1.2,dtype=tf.float32)))
        #     neg_loss=tf.reduce_mean(tf.square(neg-tf.random_uniform(shape=[self.batch_size,1],minval=0,maxval=0.3,dtype=tf.float32)))
        #     self.d_loss=neg_loss+pos_loss
        #     tf.summary.scalar('loss_d',self.d_loss)

        with tf.name_scope('g_loss'):
            # self.g_loss_1=tf.reduce_mean(tf.square(neg-tf.random_uniform(shape=[self.batch_size,1],minval=0.7,maxval=1.2,dtype=tf.float32)))
            # tf.summary.scalar('g_loss_1',self.g_loss_1)

            # self.g_loss_int =  tf.reduce_mean((self.Image_vi_weight-tf.minimum(self.Image_vi_weight,self.Image_ir_weight))*tf.square(self.fusion_image - self.labels_vi)) +\
            #                    tf.reduce_mean((self.Image_ir_weight-tf.minimum(self.Image_vi_weight,self.Image_ir_weight))*tf.square(self.fusion_image - self.labels_ir))

            self.g_loss_grau = tf.reduce_mean(
                tf.square(self.Image_fused_grad - self.Image_max_grad))

            # self.g_loss_int = tf.reduce_mean(tf.square(self.fusion_map - self.labels_mask))
            self.g_loss_int = Smooth_l1_loss(self.labels_mask, self.fusion_map)

            self.g_loss = self.g_loss_grau + 10 * self.g_loss_int

            # 5e-3

            # tf.summary.scalar('self.g_loss_int', self.g_loss_int)
            tf.summary.scalar('self.g_loss_grau', self.g_loss_grau)
            tf.summary.scalar('self.g_loss_int', self.g_loss_int)
            # tf.summary.scalar('g_loss_2',self.g_loss_2)
            # self.g_loss_total=self.g_loss_1+ self.g_loss_2
            tf.summary.scalar('loss_g', self.g_loss)

        self.saver = tf.train.Saver(max_to_keep=50)
Beispiel #33
0
                image[i][j][2]=0
            
    return image


area=np.loadtxt(area_path)
images=[]

images_path = os.path.join(images_path, '*.jpg')

for image_file in glob.glob(images_path):
    img = cv2.imread(image_file,0)
    #img = cv2.resize(img,(224,224),interpolation=cv2.INTER_CUBIC)
    #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    #print(img)
    img=utils.gradient(img)
    images.append(img)

image=cv2.imread(image_path)

n=len(area)

#print(images)

count=0

for i in range(len(area)):
    for j in range(len(area[i])):
        if area[i][j]==1:
            image=draw_map(i*normal_size,j*normal_size,image,images[count])     
            count+=1
Beispiel #34
0
 def build_model(self):
   with tf.compat.v1.name_scope('IR_input'):
       #红外图像patch
       self.images_ir = tf.compat.v1.placeholder(tf.float32, [None, self.image_size, self.image_size, self.c_dim], name='images_ir')
       self.labels_ir = tf.compat.v1.placeholder(tf.float32, [None, self.label_size, self.label_size, self.c_dim], name='labels_ir')
   with tf.compat.v1.name_scope('VI_input'):
       #可见光图像patch
       self.images_vi = tf.compat.v1.placeholder(tf.float32, [None, self.image_size, self.image_size, self.c_dim], name='images_vi')
       self.labels_vi = tf.compat.v1.placeholder(tf.float32, [None, self.label_size, self.label_size, self.c_dim], name='labels_vi')
       #self.labels_vi_gradient=gradient(self.labels_vi)
   #将红外和可见光图像在通道方向连起来,第一通道是红外图像,第二通道是可见光图像
   with tf.compat.v1.name_scope('input'):
       #self.resize_ir=tf.image.resize_images(self.images_ir, (self.image_size, self.image_size), method=2)
       self.input_image = tf.concat([self.images_ir,self.images_vi], axis=-1)
   #self.pred=tf.clip_by_value(tf.sign(self.pred_ir-self.pred_vi),0,1)
   #融合图像
   with tf.compat.v1.name_scope('fusion'):
       self.fusion_image = self.fusion_model(self.input_image)
   with tf.compat.v1.name_scope('d_loss'):
       #判决器对可见光图像和融合图像的预测
       #pos=self.discriminator(self.labels_vi,reuse=False)
       pos = self.discriminator(self.labels_vi, reuse=False)
       neg = self.discriminator(self.fusion_image, reuse=True, update_collection='NO_OPS')
       #把真实样本尽量判成1否则有损失(判决器的损失)
       #pos_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pos, labels=tf.ones_like(pos)))
       #pos_loss=tf.reduce_mean(tf.square(pos-tf.ones_like(pos)))
       pos_loss = tf.reduce_mean(tf.square(pos - tf.random.uniform(shape=[self.batch_size, 1], minval=0.7, maxval=1.2)))
       #把生成样本尽量判断成0否则有损失(判决器的损失)
       #neg_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.zeros_like(neg)))
       #neg_loss=tf.reduce_mean(tf.square(neg-tf.zeros_like(neg)))
       neg_loss = tf.reduce_mean(tf.square(neg - tf.random.uniform(shape=[self.batch_size, 1], minval=0, maxval=0.3, dtype=tf.float32)))
       #self.d_loss=pos_loss+neg_loss
       self.d_loss = neg_loss + pos_loss
       tf.compat.v1.summary.scalar('loss_d', self.d_loss)
   with tf.compat.v1.name_scope('g_loss'):
       #self.g_loss_1=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=neg, labels=tf.ones_like(neg)))
       #self.g_loss_1=tf.reduce_mean(tf.square(neg-tf.ones_like(pos)))
       self.g_loss_1 = tf.reduce_mean(tf.square(neg - tf.random.uniform(shape=[self.batch_size, 1], minval=0.7, maxval=1.2, dtype=tf.float32)))
       tf.compat.v1.summary.scalar('g_loss_1', self.g_loss_1)
       #self.g_loss_2=tf.reduce_mean(tf.square(self.fusion_image - self.labels_ir))
       self.g_loss_2 = tf.reduce_mean(tf.square(self.fusion_image - self.labels_ir)) + 5 * tf.reduce_mean(tf.square(gradient(self.fusion_image) - gradient(self.labels_vi)))
       tf.compat.v1.summary.scalar('g_loss_2', self.g_loss_2)
       self.g_loss_total = self.g_loss_1 + 100 * self.g_loss_2
       tf.compat.v1.summary.scalar('loss_g', self.g_loss_total)
   self.saver = tf.compat.v1.train.Saver(max_to_keep=50)