コード例 #1
0
    def CrossEntropyLoss(self, y_out, y_true):

        if not isinstance(y_true, Parameter):
            y_true = Parameter(data=y_true, eval_grad=False, graph=self.graph)

        batch_size = Parameter((1, 1),
                               init_zeros=True,
                               eval_grad=False,
                               graph=self.graph)  # mini-batch size
        batch_size.data.fill(float(y_true.shape[-1]))

        neg_one = Parameter((1, 1),
                            init_zeros=True,
                            eval_grad=False,
                            graph=self.graph)
        neg_one.data.fill(
            -1.0
        )  # just a -1 to make the l.grad look same in all the loss defs (dl/dl = 1)

        # KL(P || Q): Summation(P*log(P)){result: 0} - Summation(P*log(Q))
        l = self.graph.multiply(
            self.graph.sum(self.graph.multiply(y_true, self.graph.log(y_out))),
            neg_one)
        # avg_loss = (1/m)*sigma{i = 1,..,m}(loss[i])
        l = self.graph.divide(l, batch_size)

        l.grad[0, 0] = 1.0  # dl/dl = 1.0

        return l
コード例 #2
0
ファイル: module.py プロジェクト: srirambandi/NTM
    def load(self,
             file=None):  # model.load() - loads the state of net from a file
        print('loading model...')

        if file == None:
            file = self.__class__.__name__ + '.npy'

        load_dict = np.load(file, allow_pickle=True).item()
        module_layers_stored = load_dict['module_layers']
        module_params_stored = load_dict['module_params']

        module_layers_actual = self.get_module_layers()
        module_params_actual = self.get_module_params()

        for layer_name, layer_stored in module_layers_stored.items():
            if layer_name in module_layers_actual:
                for param_name, param in layer_stored.items():
                    layer_actual = module_layers_actual[layer_name]
                    setattr(layer_actual, str(param_name),
                            Parameter(data=param))

        for param_name, param in module_params_stored.items():
            if param_name in module_params_actual:
                setattr(self, str(param_name), Parameter(data=param))

        print('Successfully loaded model from {}'.format(file))
コード例 #3
0
ファイル: batch_norm.py プロジェクト: srirambandi/GAN
    def forward(self, x):

        if not isinstance(x, Parameter):
            x = Parameter(data=x, eval_grad=False, graph=self.graph)

        if self.graph.grad_mode:    # training
            # useful: https://arxiv.org/abs/1502.03167

            batch_size = Parameter((1, 1), init_zeros=True, eval_grad=False, graph=self.graph) # mini-batch size/channel size
            batch_size.data.fill(float(x.shape[self.axis]))

            # calculate mean and variance
            mean = self.graph.divide(self.graph.sum(x, axis=self.axis), batch_size)
            centered = self.graph.subtract(x, mean, axis=self.axis)
            var = self.graph.divide(self.graph.sum(self.graph.power(centered, 2), axis=self.axis), batch_size)

            self.m = self.momentum * self.m + (1 - self.momentum) * mean.data
            self.v = self.momentum * self.v + (1 - self.momentum) * var.data

            # normalize the data to zero mean and unit variance
            normalized = self.graph.multiply(centered, self.graph.power(var, -0.5), axis=self.axis)

        else:   # testing

            centered = np.subtract(x.data, self.m)
            normalized = np.multiply(centered, np.power(self.v + 1e-6, -0.5))
            normalized = Parameter(data=normalized, eval_grad=False, graph=self.graph)

        # scale and shift
        out = self.graph.multiply(normalized, self.gamma, axis=(-1,))    # scale

        if self.bias:   # shift
            out = self.graph.add(out, self.beta, axis=(-1,))

        return out
コード例 #4
0
 def init_params(self):
     self.W_ih = Parameter((self.hidden_size, self.input_size),
                           graph=self.graph)
     self.W_hh = Parameter((self.hidden_size, self.hidden_size),
                           graph=self.graph)
     self.b_ih = Parameter((self.hidden_size, 1),
                           graph=self.graph)  # not much use
     self.b_hh = Parameter((self.hidden_size, 1), graph=self.graph)
コード例 #5
0
 def init_params(self):
     self.W_ih = Parameter(
         (4 * self.hidden_size, self.input_size),
         graph=self.graph)  # input to hidden weight volume
     self.W_hh = Parameter(
         (4 * self.hidden_size, self.hidden_size),
         graph=self.graph)  # hidden to hidden weight volume
     self.b_ih = Parameter((4 * self.hidden_size, 1),
                           graph=self.graph)  # input to hidden bias vector
     self.b_hh = Parameter((4 * self.hidden_size, 1),
                           graph=self.graph)  # hidden to hidden bias vector
コード例 #6
0
ファイル: linear.py プロジェクト: srirambandi/GAN
 def init_params(self):
     root_k = np.sqrt(1. / self.input_features)
     self.W = Parameter((self.output_features, self.input_features),
                        uniform=True,
                        low=-root_k,
                        high=root_k,
                        graph=self.graph)  # weight volume
     self.b = Parameter((self.output_features, 1),
                        uniform=True,
                        low=-root_k,
                        high=root_k,
                        graph=self.graph)  # bias vector
コード例 #7
0
    def forward(self, x, hidden):

        h, c = hidden

        if not isinstance(x, Parameter):
            x = Parameter(data=x, eval_grad=False, graph=self.graph)

        i_h = self.graph.dot(self.W_ih, x)
        if self.bias:
            i_h = self.graph.add(i_h, self.b_ih, axis=(-1, ))

        h_h = self.graph.dot(self.W_hh, h)
        if self.bias:
            h_h = self.graph.add(h_h, self.b_hh, axis=(-1, ))

        gates = self.graph.add(i_h, h_h)

        # forget, input, gate(also called cell gate - different from cell state), output gates of the lstm cell
        # useful: http://colah.github.io/posts/2015-08-Understanding-LSTMs/
        f, i, g, o = self.graph.split(gates, sections=4, axis=0)

        f = self.graph.sigmoid(f)
        i = self.graph.sigmoid(i)
        g = self.graph.tanh(g)
        o = self.graph.sigmoid(o)

        c = self.graph.add(self.graph.multiply(f, c),
                           self.graph.multiply(i, g))
        h = self.graph.multiply(o, self.graph.tanh(c))

        return (h, c)
コード例 #8
0
    def JSDivLoss(self, y_out, y_true):

        if not isinstance(y_true, Parameter):
            y_true = Parameter(data=y_true, eval_grad=False, graph=self.graph)

        batch_size = Parameter((1, 1),
                               init_zeros=True,
                               eval_grad=False,
                               graph=self.graph)  # mini-batch size
        batch_size.data.fill(float(y_true.shape[-1]))

        two = Parameter((1, 1),
                        init_zeros=True,
                        eval_grad=False,
                        graph=self.graph)
        two.data.fill(2.0)  # just a 2 :p

        neg_one = Parameter((1, 1),
                            init_zeros=True,
                            eval_grad=False,
                            graph=self.graph)
        neg_one.data.fill(
            -1.0
        )  # just a -1 to make the l.grad look same in all the loss defs (dl/dl = 1)

        # mean probability: (P + Q)/2
        y_mean = self.graph.divide(self.graph.add(y_out, y_true), two)
        # KL(P || (P + Q)/2): Summation(P*log(P)){result: 0} - Summation(P*log((P+Q)/2))
        kl_1 = self.graph.multiply(
            self.graph.sum(self.graph.multiply(y_true,
                                               self.graph.log(y_mean))),
            neg_one)
        # KL(Q || (P + Q)/2): Summation(Q*log(Q)) - Summation(Q*log((P+Q)/2))
        kl_2 = self.graph.add(self.graph.multiply(y_out,
                                                  self.graph.log(y_out)),
                              self.graph.multiply(
                                  self.graph.multiply(y_out,
                                                      self.graph.log(y_mean)),
                                  neg_one))  # !!!!!
        # JS(P, Q) = 1/2*(KL(P || (P + Q)/2) + KL(Q || (P + Q)/2))
        l = self.graph.divide(self.graph.add(kl_1, kl_2), two)
        # avg_loss = (1/m)*sigma{i = 1,..,m}(loss[i])
        l = self.graph.divide(l, batch_size)

        l.grad[0, 0] = 1.0  # dl/dl = 1.0

        return l
コード例 #9
0
    def BCELoss(self, y_out, y_true):

        if not isinstance(y_true, Parameter):
            y_true = Parameter(data=y_true, eval_grad=False, graph=self.graph)

        batch_size = Parameter((1, 1),
                               init_zeros=True,
                               eval_grad=False,
                               graph=self.graph)  # mini-batch size
        batch_size.data.fill(float(y_true.shape[-1]))

        neg_one = Parameter((1, 1),
                            init_zeros=True,
                            eval_grad=False,
                            graph=self.graph)
        neg_one.data.fill(
            -1.0
        )  # just a -1 to make the l.grad look same in all the loss defs (dl/dl = 1)

        one = Parameter((1, 1),
                        init_zeros=True,
                        eval_grad=False,
                        graph=self.graph)
        one.data.fill(1.0)

        # class 2 output: 1 - c1
        c2 = self.graph.multiply(self.graph.subtract(y_out, one), neg_one)
        # class 2 target: 1 - t1
        t2 = self.graph.multiply(self.graph.subtract(y_true, one), neg_one)

        # -Summation(t1*log(c1))
        l1 = self.graph.multiply(
            self.graph.sum(self.graph.multiply(y_true, self.graph.log(y_out))),
            neg_one)
        # -Summation((1 - t1)*log(1 - c1))
        l2 = self.graph.multiply(
            self.graph.sum(self.graph.multiply(t2, self.graph.log(c2))),
            neg_one)
        # loss = -Summation(t1*log(c1)) -Summation((1 - t1)*log(1 - c1))
        l = self.graph.add(l1, l2)
        # avg_loss = (1/m)*sigma{i = 1,..,m}(loss[i])
        l = self.graph.divide(l, batch_size)

        l.grad[0, 0] = 1.0  # dl/dl = 1.0

        return l
コード例 #10
0
    def forward(self, x):

        if not isinstance(x, Parameter):
            x = Parameter(data=x, eval_grad=False, graph=self.graph)

        out = self.graph.dropout(x, p=self.p)
        
        return out
コード例 #11
0
ファイル: pooling.py プロジェクト: srirambandi/NTM
    def forward(self, x):

        if not isinstance(x, Parameter):
            x = Parameter(data=x, eval_grad=False, graph=self.graph)

        out = self.graph.max_pool2d(x,
                                    k=self.kernel_size,
                                    s=self.stride,
                                    p=self.padding)

        return out
コード例 #12
0
    def MSELoss(self, y_out, y_true):

        if not isinstance(y_true, Parameter):
            y_true = Parameter(data=y_true, eval_grad=False, graph=self.graph)

        batch_size = Parameter((1, 1),
                               init_zeros=True,
                               eval_grad=False,
                               graph=self.graph)  # mini-batch size
        batch_size.data.fill(float(y_true.shape[-1]))

        # L = (y_out - y_true)^2
        l = self.graph.sum(
            self.graph.multiply(self.graph.subtract(y_out, y_true),
                                self.graph.subtract(y_out, y_true)))
        # avg_loss = (1/m)*sigma{i = 1,..,m}(loss[i])
        l = self.graph.divide(l, batch_size)

        l.grad[0, 0] = 1.0  # dl/dl = 1.0

        return l
コード例 #13
0
    def forward(self, x):

        if not isinstance(x, Parameter):
            x = Parameter(data=x, eval_grad=False, graph=self.graph)

        # convolution operation
        out = self.graph.conv2d_faster(x, self.K, self.stride, self.padding)

        if self.bias:  # adding bias
            out = self.graph.add(out, self.b, axis=(-3, -2, -1))

        return out
コード例 #14
0
ファイル: sequence_models.py プロジェクト: srirambandi/GAN
 def init_params(self):
     root_k = np.sqrt(1. / self.hidden_size)
     self.W_ih = Parameter((self.hidden_size, self.input_size),
                           uniform=True,
                           low=-root_k,
                           high=root_k,
                           graph=self.graph)
     self.W_hh = Parameter((self.hidden_size, self.hidden_size),
                           uniform=True,
                           low=-root_k,
                           high=root_k,
                           graph=self.graph)
     self.b_ih = Parameter((self.hidden_size, 1),
                           uniform=True,
                           low=-root_k,
                           high=root_k,
                           graph=self.graph)  # not much use
     self.b_hh = Parameter((self.hidden_size, 1),
                           uniform=True,
                           low=-root_k,
                           high=root_k,
                           graph=self.graph)
コード例 #15
0
ファイル: sequence_models.py プロジェクト: srirambandi/GAN
 def init_params(self):
     root_k = np.sqrt(1. / self.hidden_size)
     self.W_ih = Parameter(
         (4 * self.hidden_size, self.input_size),
         uniform=True,
         low=-root_k,
         high=root_k,
         graph=self.graph)  # input to hidden weight volume
     self.W_hh = Parameter(
         (4 * self.hidden_size, self.hidden_size),
         uniform=True,
         low=-root_k,
         high=root_k,
         graph=self.graph)  # hidden to hidden weight volume
     self.b_ih = Parameter((4 * self.hidden_size, 1),
                           uniform=True,
                           low=-root_k,
                           high=root_k,
                           graph=self.graph)  # input to hidden bias vector
     self.b_hh = Parameter((4 * self.hidden_size, 1),
                           uniform=True,
                           low=-root_k,
                           high=root_k,
                           graph=self.graph)  # hidden to hidden bias vector
コード例 #16
0
    def TestLoss(self, y_out):

        batch_size = Parameter((1, 1),
                               init_zeros=True,
                               eval_grad=False,
                               graph=self.graph)  # mini-batch size
        batch_size.data.fill(float(y_out.shape[-1]))

        # a test loss score function that measures the sum of elements of each output vector as the loss of that sample
        # helps identify leaks in between samples in a batch
        l = self.graph.sum(y_out)
        l = self.graph.divide(l, batch_size)

        l.grad[0, 0] = 1.0

        return l
コード例 #17
0
ファイル: linear.py プロジェクト: srirambandi/GAN
    def forward(self, x):
        # making the input compatible with graph operations
        if not isinstance(x, Parameter):
            x = Parameter(data=x, eval_grad=False, graph=self.graph)

        # flatten the input if it came from layers like Conv2d
        if len(x.shape) > 2:
            x = self.graph.reshape(x)

        # y = Wx + b
        out = self.graph.dot(self.W, x)  # matmul

        if self.bias:  # adding bias
            out = self.graph.add(out, self.b, axis=(-1, ))

        return out
コード例 #18
0
    def forward(self, x, hidden):

        h = hidden

        if not isinstance(x, Parameter):
            x = Parameter(data=x, eval_grad=False, graph=self.graph)

        i_h = self.graph.dot(self.W_ih, x)
        if self.bias:
            i_h = self.graph.add(i_h, self.b_ih, axis=(-1, ))

        h_h = self.graph.dot(self.W_hh, h)
        if self.bias:
            h_h = self.graph.add(h_h, self.b_hh, axis=(-1, ))

        h = self.graph.add(i_h, h_h)

        h = self.graph.tanh(h)

        return h
コード例 #19
0
ファイル: convolutional.py プロジェクト: srirambandi/NTM
 def init_params(self):
     root_k = np.sqrt(1. / (self.output_channels * self.kernel_size[0] * self.kernel_size[1]))
     self.K = Parameter((self.input_channels, *self.filter_size), uniform=True, low=-root_k, high=root_k, graph=self.graph)
     self.b = Parameter((self.output_channels, 1, 1, 1), uniform=True, low=-root_k, high=root_k, graph=self.graph)
コード例 #20
0
ファイル: batch_norm.py プロジェクト: srirambandi/GAN
 def init_params(self):
     shape = (*self.hidden_shape, 1)
     self.gamma = Parameter(shape, init_ones=True, graph=self.graph)
     self.beta = Parameter(shape, init_zeros=True, graph=self.graph)
     self.m = np.sum(np.zeros(shape), axis=self.axis, keepdims=True) / shape[self.axis]    # moving mean
     self.v = np.sum(np.ones(shape), axis=self.axis, keepdims=True) / shape[self.axis]     # moving variance
コード例 #21
0
 def init_params(self):
     self.K = Parameter((self.input_channels, *self.filter_size),
                        graph=self.graph)
     self.b = Parameter((self.output_channels, 1, 1, 1),
                        init_zeros=True,
                        graph=self.graph)
コード例 #22
0
class Environment(gym.Env):
    params = [
        Parameter('param_' + str(x), min_value=-10, max_value=10)
        for x in range(MAX_RESULTS)
    ]

    def __init__(self, agent):
        super(Environment, self).__init__()
        self._agent = agent
        self._epoch_num = 0
        self._last_render = None
        self._state = {}

        self.last = {
            'reward': 0.0,
            'observation': None,
            'policy': None,
            'params': {},
            'state': None,
            'state_v': None
        }

        self.action_space = spaces.MultiDiscrete(
            [p.space_size() for p in Environment.params if p.trainable])
        self.observation_space = spaces.Box(low=0,
                                            high=1,
                                            shape=featurizer.shape,
                                            dtype=np.float32)
        self.reward_range = reward.range

    @staticmethod
    def policy_size():
        return len(list(p for p in Environment.params if p.trainable))

    @staticmethod
    def policy_to_params(policy):
        num = len(policy)
        params = {}

        assert len(Environment.params) == num

        for i in range(num):
            param = Environment.params[i]
            params[param.name] = param.to_param_value(policy[i])

        return params

    def _apply_policy(self, policy):
        new_params = Environment.policy_to_params(policy)
        self.last['policy'] = policy
        self.last['params'] = new_params
        self._agent.on_ai_policy(new_params)

    def set_state(self, state):
        self._state = state
        return self._state

    def get_state(self):
        return self._state

    def step(self, policy):
        self._apply_policy(policy)
        self._epoch_num += 1
        f = reward.RewardFunction()
        self.last['reward'] = f(self._state)
        self.last['state'] = self._state
        self.last['state_v'] = featurizer.featurize(self._state)
        self._agent.on_ai_step()
        return self.last['state_v'], self.last[
            'reward'], not self._agent.is_training(), {}

    def reset(self):
        self._epoch_num = 0

    def render(self, mode='human', close=False, force=False):
        return