예제 #1
0
def lossless_triplet_loss(anchor,
                          positive,
                          negative,
                          N,
                          beta=None,
                          epsilon=1e-8):
    """
    N  --  The number of dimension
    beta -- The scaling factor, N is recommended
    epsilon -- The Epsilon value to prevent ln(0)
    """

    if beta is None:
        beta = N

    # pos_dist = F.sum((anchor - positive) ** 2, axis=1)
    # neg_dist = F.sum((anchor - negative) ** 2, axis=1)

    pos_dist = -F.log(-(F.sum((anchor - positive)**2, axis=1) / beta) + 1 +
                      epsilon)
    neg_dist = -F.log(-(
        (N - F.sum((anchor - negative)**2, axis=1)) / beta) + 1 + epsilon)

    loss = pos_dist + neg_dist

    import chainer
    import numpy
    nloss = chainer.as_array(loss)
    if numpy.isnan(nloss).any():
        print()

    return loss
예제 #2
0
    def calculate_Qvalue(self):
        """
        Function that calculates MaxQ for actions.
        Choose the best action type by comparing the maximum q values for both action types "move" and "peck"
        :return: action-value map.
        """

        action_value = {}

        for action in self.env.actions:
            action_value[action] = {}
            action_value[action]["SAT"] = {}
            action_value[action]["best_q"] = -np.inf

            for sat in self.env.sat_true_list:
                self.env.action_type = self.env.actions.index(action)
                self.env.sat_true = self.env.sat_true_list.index(sat)

                # set belief with current evidence.
                self.env.set_belief()

                state = self.env.preprocess_belief()

                if self.gpu:
                    q_values = cuda.to_cpu(
                        chainer.as_array(
                            self.q_func(
                                cuda.to_gpu(state.reshape(
                                    (1, self.env.observation_space.shape[0])),
                                            device=0)).q_values).reshape(
                                                (-1, )))
                else:
                    q_values = chainer.as_array(
                        self.q_func(
                            state.reshape(
                                (1, self.env.observation_space.shape[0]
                                 ))).q_values).reshape((-1, ))

                action_value[action]["SAT"][self.env.sat_true] = {
                    "q_values": q_values,
                    "max_q": np.max(q_values)
                }

                if np.max(q_values) > action_value[action]["best_q"]:
                    action_value[action]["best_q"] = np.max(q_values)

        return action_value
예제 #3
0
    def choose_best_action(self):
        """
        Function to choose best action given action-value map.
        """

        state = self.env.preprocess_belief()
        if self.gpu:
            q_values = cuda.to_cpu(
                chainer.as_array(
                    self.q_func(
                        cuda.to_gpu(state.reshape(
                            (1, self.env.observation_space.shape[0])),
                                    device=0)).q_values).reshape((-1, )))
        else:
            q_values = chainer.as_array(
                self.q_func(
                    state.reshape((1, self.env.observation_space.shape[0]
                                   ))).q_values).reshape((-1, ))

        best_action = np.where(q_values == np.amax(q_values))[0]

        return np.random.choice(best_action), np.amax(q_values)
예제 #4
0
    def worker():
        loop = tqdm.tqdm(range(1000))
        for i in loop:
            loop.set_description('Optimizing')
            optimizer.target.cleargrads()
            loss = model()
            loss.backward()
            optimizer.update()

            str_list = []
            # to accelerate variable access, transfer the data to cpu from gpu
            model.vertices.to_cpu()
            varray = chainer.as_array(model.vertices)
            for v in varray[0]:
                str_list.append(
                    "{{\"x\":{:.6f},\"y\":{:.6f},\"z\":{:.6f}}}".format(
                        v[0], v[1], v[2]))
            varrayStr = ",".join(str_list)
            model.vertices.to_gpu()
            yield "{{\"vertices\":[{0}]}}\n".format(varrayStr)
예제 #5
0
import chainer
import importlib
import torch

from generator import ResNetDeepLab as CRes
from options import get_options
from Pytorch.generator import ResNetDeepLab as PRes

gen_npz = 'pretrained/gen.npz'
opt = get_options()

c_gen = CRes(opt)
p_gen = PRes(opt)

chainer.serializers.load_npz(gen_npz, c_gen)
d = dict([(i, torch.from_numpy(chainer.as_array(j)))
          for i, j in c_gen.namedparams() if 'resnet' not in i])

ordered_layer_list = [
    '/c1/c/b',  #1
    '/c1/c/W',  #1  
    '/norm1/gamma',  #2
    '/norm1/beta',  #2
    '/c2/c/b',  #3
    '/c2/c/W',  #3
    '/norm2/gamma',  #4
    '/norm2/beta',  #4
    '/aspp/x1/c/b',  #5 1
    '/aspp/x1/c/W',  #5 1
    '/aspp/x1_bn/gamma',  #5 2
    '/aspp/x1_bn/beta',  #5 2
예제 #6
0
wdistance = []

for i in range(0, nvideos - 1, 2):

    real_vid1 = Image.open(all_paths[i])
    real_vid2 = Image.open(all_paths[i + 1])

    real_vid1 = preprocess_vid(real_vid1)
    real_vid2 = preprocess_vid(real_vid2)

    real_vid = np.array([real_vid1, real_vid2])

    latent_z = generator.sample_hidden(2)
    fake_vid = generator(latent_z)

    # Generate a new video and retrieve only the data
    with chainer.using_config('train', False) and chainer.using_config(
            'enable_backprop', False):
        eval_real = chainer.as_array(discriminator(real_vid))
        eval_fake = chainer.as_array(discriminator(fake_vid))

    dist_real_fake = np.abs(eval_real - eval_fake)
    wdistance.append(dist_real_fake)

    print(dist_real_fake)

wdistance = np.array(wdistance).flatten()
print(
    f'Average wasserstein distance for {MODE} and {nvideos} test samples = {np.mean(wdistance)}'
)