Example #1
0
 def setUp(self):
     super(TestNestedFunctionSet, self).setUp()
     self.fs1 = chainer.FunctionSet(
         a=SimpleLink((1, 2)))
     self.fs2 = chainer.FunctionSet(
         fs1=self.fs1,
         b=SimpleLink((3, 4)))
    def __init__(self):
        self.state = xp.array([[0.0, 0.0]], dtype=np.float32)
        self.next_state = xp.array([[0.0, 0.0]], dtype=np.float32)
        self.reward = 0.0
        self.action = 0.0

        self.critic_model = chainer.FunctionSet(
            l1=F.Linear(2, 100),
            l2=F.Linear(100, 1, initialW=np.zeros((1, 100), dtype=np.float32)),
        )
        self.actor_model = chainer.FunctionSet(
            l1=F.Linear(2, 100),
            l2=F.Linear(100, 1, initialW=np.zeros((1, 100), dtype=np.float32)),
        )
        if args.gpu >= 0:
            self.critic_model.to_gpu()
            self.actor_model.to_gpu()
        self.critic_optimizer = optimizers.SGD(self.ALPHA)
        self.critic_optimizer.setup(self.critic_model)
        self.actor_optimizer = optimizers.SGD(self.ALPHA)
        self.actor_optimizer.setup(self.actor_model)

        self.oldact = 0.0
        self.sigma = 10.0

        self.limit_action = 5.0
        self.min_action = -5.0
        self.max_action = 5.0
    def __init__(self):
        self.init_theta = 0.0
        self.init_omega = 0.0

        self.episode_time = 10.0
        self.hz = 20.0

        self.evaluation_freq = 100

        self.critic_model = chainer.FunctionSet(
            l1=F.Linear(2, 100),
            l2=F.Linear(100, 100),
            l3=F.Linear(100, 1, initialW=np.zeros((1, 100), dtype=np.float32)),
        )
        self.target_critic_model = copy.deepcopy(self.critic_model)
        self.actor_model = chainer.FunctionSet(
            l1=F.Linear(2, 100),
            l2=F.Linear(100, 100),
            l3=F.Linear(100, 1, initialW=np.zeros((1, 100), dtype=np.float32)),
        )
        if args.gpu >= 0:
            self.critic_model.to_gpu()
            self.target_critic_model.to_gpu()
            self.actor_model.to_gpu()
        #  self.critic_optimizer = optimizers.SGD(self.ALPHA)
        self.critic_optimizer = optimizers.Adam(self.ALPHA)
        self.critic_optimizer.setup(self.critic_model)
        #  self.actor_optimizer = optimizers.SGD(self.ALPHA)
        self.actor_optimizer = optimizers.Adam(self.ALPHA)
        self.actor_optimizer.setup(self.actor_model)

        #history data : D = [state(theta, omega), action, old_action, reward, next_state(theta, omega), episode_end_flag]
        self.D = [
            xp.zeros((self.data_size, 1, 2), dtype=xp.float32),
            xp.zeros(self.data_size, dtype=xp.float32),
            xp.zeros(self.data_size, dtype=xp.float32),
            xp.zeros((self.data_size, 1), dtype=xp.float32),
            xp.zeros((self.data_size, 1, 2), dtype=xp.float32),
            xp.zeros((self.data_size, 1), dtype=np.bool)
        ]
        #  print "D : ", self.D

        self.D_actor = [
            xp.zeros((self.data_size, 1, 2), dtype=xp.float32),
            xp.zeros(self.data_size, dtype=xp.float32),
            xp.zeros(self.data_size, dtype=xp.float32),
            xp.zeros((self.data_size, 1), dtype=xp.float32),
            xp.zeros((self.data_size, 1, 2), dtype=xp.float32),
            xp.zeros((self.data_size, 1), dtype=np.bool)
        ]
        #  print "D_actor : ", self.D_actor

        self.sigma = 10.0

        self.limit_action = 5.0
        self.min_action = -5.0
        self.max_action = 5.0

        self.data_index_actor = 0
Example #4
0
def init_model():
    #Make models
    if use_pre2 == 'pre': pre_unit = 4
    else: pre_unit = 0
    if use_null == 'null': null_unit = 6
    else: null_unit = 0
    if args.phrase == 'phrase':
        phrase_unit = 4
        model = chainer.FunctionSet(
            trainable=chainer.FunctionSet(
                w0=F.Linear(n_units * 2 + null_unit * 2, n_label),
                ww0=F.Linear(
                    n_units * 2 + pre_unit + null_unit * 2 + phrase_unit,
                    n_units + null_unit),
                ww1=F.Linear(
                    n_units * 2 + pre_unit + null_unit * 2 + phrase_unit,
                    n_units + null_unit),
            ),
            w1_f=F.Linear(n_units * 2 + null_unit * 2,
                          n_units + null_unit),  #source input
            w2_f=F.Linear(n_units + null_unit,
                          n_units * 2 + null_unit * 2),  #source output
            w1_e=F.Linear(n_units * 2 + null_unit * 2,
                          n_units + null_unit),  #target input
            w2_e=F.Linear(n_units + null_unit,
                          n_units * 2 + null_unit * 2),  #target output
            embed_f=F.EmbedID(vocab_f['len_vocab'],
                              n_units),  #source word embedding
            embed_e=F.EmbedID(vocab_e['len_vocab'],
                              n_units),  #target word embedding
        )
    else:
        model = chainer.FunctionSet(
            trainable=chainer.FunctionSet(w0=F.Linear(
                n_units * 4 + null_unit * 4, n_label), ),
            w1_f=F.Linear(n_units * 2 + null_unit * 2,
                          n_units + null_unit),  #source input
            w2_f=F.Linear(n_units + null_unit,
                          n_units * 2 + null_unit * 2),  #source output
            w1_e=F.Linear(n_units * 2 + null_unit * 2,
                          n_units + null_unit),  #target input
            w2_e=F.Linear(n_units + null_unit,
                          n_units * 2 + null_unit * 2),  #target output
            embed_f=F.EmbedID(vocab_f['len_vocab'],
                              n_units),  #source word embedding
            embed_e=F.EmbedID(vocab_e['len_vocab'],
                              n_units),  #target word embedding 
        )
    if opt_name == 'SGD':
        optimizer = optimizers.SGD(lr=0.02)  # (lr=opt_score)  # lr=0.01
    elif opt_name == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=0.001)  # (lr=opt_score)  # lr=0.001
    elif opt_name == 'AdaDelta':
        optimizer = optimizers.AdaDelta(rho=0.9)  # (rho=opt_score)  # rho=0.9
    elif opt_name == 'Adam':
        optimizer = optimizers.Adam(
            alpha=0.0001)  # (alpha=opt_score)  # alpha=0.0001
    optimizer.setup(model)  # .collect_parameters()
    return model, optimizer
Example #5
0
    def __init__(self):
        #  self.init_theta = math.pi
        self.init_theta = 0.0
        self.init_omega = 0.0

        self.episode_time = 10.0 * 2.0
        self.hz = 10.0 * 2.0

        self.evaluation_freq = 100

        self.state = xp.array([[self.init_theta, self.init_omega]],
                              dtype=np.float32)
        self.next_state = xp.array([[self.init_theta, self.init_omega]],
                                   dtype=np.float32)
        self.reward = 0.0
        self.action = 0.0

        self.critic_model = chainer.FunctionSet(
            l1=F.Linear(2, 1000),
            l2=F.Linear(1000, 500),
            l3=F.Linear(500, 250),
            l4=F.Linear(250, 125),
            l5=F.Linear(125, 60),
            l6=F.Linear(60, 30),
            l7=F.Linear(30, 1, initialW=np.zeros((1, 30), dtype=np.float32)),
        )
        self.actor_model = chainer.FunctionSet(
            l1=F.Linear(2, 1000),
            l2=F.Linear(1000, 500),
            l3=F.Linear(500, 250),
            l4=F.Linear(250, 125),
            l5=F.Linear(125, 60),
            l6=F.Linear(60, 30),
            l7=F.Linear(30, 1, initialW=np.zeros((1, 30), dtype=np.float32)),
        )
        """
        self.critic_model = chainer.FunctionSet(
                l1 = F.Linear(2, 1, initialW=np.zeros((1, 2), dtype=np.float32)),
                )
        self.actor_model = chainer.FunctionSet(
                l1 = F.Linear(2, 1, initialW=np.zeros((1, 2), dtype=np.float32))
                )
        """

        if args.gpu >= 0:
            self.critic_model.to_gpu()
            self.actor_model.to_gpu()
        self.critic_optimizer = optimizers.SGD(self.ALPHA)
        self.critic_optimizer.setup(self.critic_model)
        self.actor_optimizer = optimizers.SGD(self.ALPHA_actor)
        self.actor_optimizer.setup(self.actor_model)

        self.oldact = 0.0

        self.limit_action = 5.0
        self.min_action = -5.0
        self.max_action = 5.0
Example #6
0
    def setUp(self):
        self.fs1 = chainer.FunctionSet(
            a=MockFunction((1, 2)))
        self.fs2 = chainer.FunctionSet(
            fs1=self.fs1,
            b=MockFunction((3, 4)))

        self.p_b = np.zeros((3, 4)).astype(np.float32)
        self.p_a = np.zeros((1, 2)).astype(np.float32)
        self.gp_b = np.ones((3, 4)).astype(np.float32)
        self.gp_a = np.ones((1, 2)).astype(np.float32)
    def define_model(self):
        # Model1
        self.model = chainer.FunctionSet(l1=F.Linear(784, 1000),
                                         l2=F.Linear(1000, 1000),
                                         l3=F.Linear(1000, 100))

        # Model2
        self.model2 = chainer.FunctionSet(l1=F.Linear(100, 1000),
                                          l2=F.Linear(1000, 10))

        # Assign models to GPU
        self.model.to_gpu(GPU1)
        self.model2.to_gpu(GPU2)
    def define_model(self):
        # Model1
        self.model = chainer.FunctionSet(conv1=F.Convolution2D(1, 20, 5),
                                         conv2=F.Convolution2D(20, 50, 5),
                                         l1=F.Linear(200, 1000),
                                         l2=F.Linear(1000, 1000))

        # Model2
        self.model2 = chainer.FunctionSet(l1=F.Linear(1000, 10000),
                                          l2=F.Linear(10000, 10))

        # Assign models to GPU
        self.model.to_gpu(GPU1)
        self.model2.to_gpu(GPU2)
Example #9
0
    def __init__(self, n_units, vocab_in, vocab_out, loadpath=None, gpu=-1):
        self.xp = np

        self.tagger = igo.tagger.Tagger(self.DIC_DIR)

        self.vocab_in = vocab_in
        self.vocab_out = vocab_out

        self.n_units = n_units

        if loadpath:
            with open(loadpath, 'rb') as f:
                self.model = pickle.load(f)

        else:
            self.model = chainer.FunctionSet(
                embed=F.EmbedID(len(self.vocab_in), n_units),
                l1_x=F.Linear(self.n_units, 4 * self.n_units),
                l1_h=F.Linear(self.n_units, 4 * self.n_units),
                l2_x=F.Linear(self.n_units, 4 * self.n_units),
                l2_h=F.Linear(self.n_units, 4 * self.n_units),
                l3=F.Linear(self.n_units, len(self.vocab_out)),
            )

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
Example #10
0
	def __init__(self):
		self.joint_state = np.zeros((3), dtype=np.float32)
		#  print self.joint_state
		self.action_num = 0
		self.reward = 0.0

		self.num_state = 0
		self.num_step = 0

		self.state = 0
		self.next_state = 0

                self.model = chainer.FunctionSet(
                        l1 = F.Linear(3, 120),
                        l2 = F.Linear(120, 80),
                        l3 = F.Linear(80, 40),
                        l4 = F.Linear(40, 27, initialW=np.zeros((27, 40), dtype=np.float32)),
                        )
                if args.gpu >= 0:
                    self.model.to_gpu()
                self.optimizer = optimizers.SGD()
                self.optimizer.setup(self.model)
                self.q_list = chainer.Variable(xp.zeros((1, 27), dtype=xp.float32))

                self.action = 0
                self.state = 0
                self.next_state = 0
                self.joint1 = self.init_state_joint1
                self.joint3 = self.init_state_joint3
                self.joint5 = self.init_state_joint5
                self.next_joint1 = self.init_state_joint1
                self.next_joint3 = self.init_state_joint3
                self.next_joint5 = self.init_state_joint5
Example #11
0
def create_model():
    model = chainer.FunctionSet(
        l1=F.Linear(4, pnum),
        l2=F.Linear(pnum, 3),
    )

    return model
Example #12
0
 def __init__(self, optimizer):
     self.model = chainer.FunctionSet(l=F.Linear(self.UNIT_NUM, 2))
     self.optimizer = optimizer
     # true parameters
     self.w = np.random.uniform(-1, 1,
                                (self.UNIT_NUM, 1)).astype(np.float32)
     self.b = np.random.uniform(-1, 1, (1, )).astype(np.float32)
Example #13
0
    def __init__(self):
        self.joint_state = np.zeros((3), dtype=np.float32)
        #  print self.joint_state
        self.action_num = 0
        self.reward = 0.0

        self.num_step = 0
        self.num_episode = 0

        self.model = chainer.FunctionSet(
            l1=F.Linear(3, 1024),
            l2=F.Linear(1024, 512),
            l3=F.Linear(512, 256),
            l4=F.Linear(256, 128),
            l5=F.Linear(128, 64),
            l6=F.Linear(64, 27, initialW=np.zeros((27, 64), dtype=np.float32)),
        )
        if args.gpu >= 0:
            self.model.to_gpu()
        self.optimizer = optimizers.SGD()
        self.optimizer.setup(self.model)
        self.q_list = chainer.Variable(xp.zeros((1, 27), dtype=xp.float32))

        self.action = 0
        self.joint1 = self.init_state_joint1
        self.joint3 = self.init_state_joint3
        self.joint5 = self.init_state_joint5
        self.next_joint1 = self.init_state_joint1
        self.next_joint3 = self.init_state_joint3
        self.next_joint5 = self.init_state_joint5
Example #14
0
 def __init__(self, image):
     self.image = image
     self.CNNmodel = chainer.FunctionSet(conv1=L.Convolution2D(1, 64, 6),
                                         conv2=L.Convolution2D(64, 256, 6),
                                         conv3=L.Convolution2D(256, 512, 5),
                                         l1=L.Linear(18432, 8192),
                                         l2=L.Linear(8192, 6605))
Example #15
0
    def set_order(self, encl, decl):
        if len(encl) != len(decl):
            raise TypeError('Encode/Decode layers mismatch')

        self.depth = len(encl)

        for (el, dl) in zip(encl, reversed(decl)):
            self.sublayer.append(
                chainer.FunctionSet(enc=self.model[el], dec=self.model[dl]))
Example #16
0
def set_order(encl, decl):
    if len(encl) != len(decl):
        raise TypeError('Encode/Decode layers mismatch')

    #depth = len(encl)

    for (el, dl) in zip(encl, reversed(decl)):
        submodel = chainer.FunctionSet(enc=model[el], dec=model[dl])
        sublayer.append(submodel)
Example #17
0
def init_model(vocab_size):
    model = chainer.FunctionSet(
        embed=F.EmbedID(vocab_size, embed_units),
        hidden1=F.Linear(window * embed_units, hidden_units),
        output=F.Linear(hidden_units, label_num),
    )
    opt = optimizers.AdaGrad(lr=learning_rate)
    opt.setup(model)
    return model, opt
Example #18
0
    def __init__(self,
                 emb_dim,
                 vocab_size,
                 layers,
                 suppress_output=False,
                 lstm=False,
                 irnn=False,
                 active=F.relu,
                 eos_id=0):
        """
        Recurrent Neural Network with multiple layers.
        in_dim -> layers[0] -> ... -> layers[-1] -> out_dim (optional)

        :param int emb_dim: dimension of embeddings
        :param int vocab_size: size of vocabulary
        :param layers: dimensions of hidden layers
        :type layers: list of int
        :param bool suppress_output: suppress output
        :param bool lstm: whether to use LSTM
        :param bool irnn: whether to use IRNN
        :param chainer.Function active: activation function between layers of vanilla RNN
        :param int eos_id: ID of <BOS> and <EOS>
        """
        assert not (lstm and irnn)

        self.emb_dim = emb_dim
        self.vocab_size = vocab_size
        self.layers = layers
        self.suppress_output = suppress_output
        self.lstm = lstm
        self.irnn = irnn
        self.active = active
        self.eos_id = eos_id

        # set up NN architecture
        model = chainer.FunctionSet(emb=F.EmbedID(vocab_size, emb_dim), )
        # add hidden layers
        layer_dims = [emb_dim] + layers
        for i in range(len(layers)):
            in_dim = layer_dims[i]
            out_dim = layer_dims[i + 1]
            if lstm:
                linear = F.Linear(in_dim, out_dim * 4)
                hidden = F.Linear(out_dim, out_dim * 4)
            else:
                linear = F.Linear(in_dim, out_dim)
                hidden = F.Linear(out_dim, out_dim)
                if irnn:
                    # initialize hidden connection with identity matrix
                    hidden.W = np.eye(out_dim)
            setattr(model, 'l{}_x'.format(i + 1), linear)
            setattr(model, 'l{}_h'.format(i + 1), hidden)
        if not suppress_output:
            # add output layer
            setattr(model, 'l_y', F.Linear(layer_dims[-1], vocab_size))
        self.model = model
Example #19
0
def main():

    global par
    par = 100

    dn = 10000

    global x_e, h1_e, h2_e, h3_e, h4_e, y_e
    k = int(dn / par)
    x_e = np.zeros(k)
    h1_e = np.zeros(k)
    h2_e = np.zeros(k)
    h3_e = np.zeros(k)
    h4_e = np.zeros(k)
    y_e = np.zeros(k)

    model = chainer.FunctionSet(conv1=chainer.links.Convolution2D(1, 32, 5),
                                conv2=chainer.links.Convolution2D(32, 64, 4),
                                conv3=chainer.links.Convolution2D(64, 128, 3),
                                fully1=F.Linear(512, 256),
                                fully2=F.Linear(256, 10))

    optimizer = optimizers.SGD()
    optimizer.setup(model)

    for i in range(dn):

        print i

        data = mnist.img()
        label = mnist.label()

        x = chainer.Variable(data.reshape(1, 1, 28, 28).astype(np.float32))
        t = chainer.Variable(np.array([label]).reshape(1).astype(np.int32))
        y = forward(model, x, i)

        optimizer.zero_grads()
        loss = F.softmax_cross_entropy(y, t)
        loss.backward()
        optimizer.update()

    print "plotting..."

    x = (np.arange(k) + 1) * par
    plt.plot(x, x_e / par, label="x")
    plt.plot(x, h1_e / par, label="h1")
    plt.plot(x, h2_e / par, label="h2")
    plt.plot(x, h3_e / par, label="h3")
    plt.plot(x, h4_e / par, label="h4")
    plt.plot(x, y_e / par, label="y")

    plt.legend(loc="lower left")
    plt.xlabel("learned data")
    plt.ylabel("entropy")
    plt.savefig("./fig01.png")
    plt.show()
Example #20
0
def make_model():
    model = chainer.FunctionSet(conv1=F.Convolution2D(3, 32, 2, pad=1),
        conv2=F.Convolution2D(32, 32, 2, pad=1),
        conv3=F.Convolution2D(32, 32, 2, pad=1),
        conv4=F.Convolution2D(32, 32, 2, pad=1),
        conv5=F.Convolution2D(32, 32, 2, pad=1),
        conv6=F.Convolution2D(32, 32, 2, pad=1),
        l1=F.Linear(512, 512),
        l2=F.Linear(512, 10))
    return model
Example #21
0
    def setUp(self):
        self.fs = chainer.FunctionSet(a=L.Linear(3, 2), b=L.Linear(3, 2))
        self.aW = self.fs.a.W.data
        self.ab = self.fs.a.b.data
        self.bW = self.fs.b.W.data
        self.bb = self.fs.b.b.data

        self.agW = self.fs.a.W.grad
        self.agb = self.fs.a.b.grad
        self.bgW = self.fs.b.W.grad
        self.bgb = self.fs.b.b.grad
Example #22
0
 def defineModel(self):
     lstm_unit = self.unit_num
     self.model = chainer.FunctionSet(
         conv=F.Convolution2D(self.input_num,
                              self.hidden_num, (self.k_w, self.k_h),
                              pad=0),
         line1=F.Linear(self.unit_num, lstm_unit),
         lstm=L.LSTM(lstm_unit, lstm_unit),
         line2=F.Linear(lstm_unit, self.unit_num),
         deconv=L.Deconvolution2D(self.hidden_num,
                                  self.output_num, (self.k_w, self.k_h),
                                  pad=0))
Example #23
0
    def __init__(self, encoder, decoder):
        """
        :param rnn.Rnn encoder: encoder RNN
        :param rnn.Rnn decoder: decoder RNN
        :return:
        """
        assert encoder.suppress_output
        assert not decoder.suppress_output

        self.encoder = encoder
        self.decoder = decoder
        self.model = chainer.FunctionSet(encoder=encoder.model,
                                         decoder=decoder.model)
Example #24
0
    def setUp(self):
        self.fs = chainer.FunctionSet(
            a=F.Linear(3, 2),
            b=F.Linear(3, 2)
        )
        self.aW = self.fs.a.W
        self.ab = self.fs.a.b
        self.bW = self.fs.b.W
        self.bb = self.fs.b.b

        self.agW = self.fs.a.gW
        self.agb = self.fs.a.gb
        self.bgW = self.fs.b.gW
        self.bgb = self.fs.b.gb
Example #25
0
def init_model(args):
    input_layer = 36
    ans_num = 3
    model = chainer.FunctionSet(
        w1=F.Linear(input_layer, args.hidden),  #source input
        w2=F.Linear(args.hidden, args.hidden),  #hidden
        w3=F.Linear(args.hidden, ans_num),  #target output
    )
    optimizer = optimizers.SGD(lr=0.01)  # (lr=opt_score)  # lr=0.01
    #optimizer = optimizers.AdaGrad(lr=0.001)
    #optimizer = optimizers.AdaDelta(rho=0.9)
    #optimizer = optimizers.Adam(alpha=0.0001)
    optimizer.setup(model)  # .collect_parameters()
    return model, optimizer
Example #26
0
def rnn(io_size, fast_hidden_size, slow_hidden_size):
    model = chainer.FunctionSet(x_to_fh=F.Linear(io_size, fast_hidden_size),
                                fh_to_fh=F.Linear(fast_hidden_size,
                                                  fast_hidden_size),
                                fh_to_sh=F.Linear(fast_hidden_size,
                                                  slow_hidden_size),
                                sh_to_fh=F.Linear(slow_hidden_size,
                                                  fast_hidden_size),
                                sh_to_sh=F.Linear(slow_hidden_size,
                                                  slow_hidden_size),
                                fh_to_y=F.Linear(fast_hidden_size, io_size))
    for param in model.parameters:
        param[:] = np.random.uniform(-0.1, 0.1, param.shape)
    return model
Example #27
0
    def __init__(self):
        self.joint_state = np.zeros((3), dtype=np.float32)
        self.action_num = 0
        self.reward = 0.0

        self.target_point = PointCloud()
        self.target_init_y = 0.000
        #  self.target_init_x = 0.680
        self.target_init_x = math.sqrt(self.L_2**2 -
                                       self.target_init_y**2) + 0.270
        self.target_init_z = 0.900

        self.num_step = 0
        self.num_episode = 0

        self.model = chainer.FunctionSet(
            l1=F.Linear(6, 2560),
            l2=F.Linear(2560, 1280),
            l3=F.Linear(1280, 640),
            l4=F.Linear(640, 320),
            l5=F.Linear(320, 160),
            l6=F.Linear(160, 80),
            l7=F.Linear(80, 27, initialW=np.zeros((27, 80), dtype=np.float32)),
        )
        self.model_target = copy.deepcopy(self.model)
        if args.gpu >= 0:
            self.model.to_gpu()
            self.model_target.to_gpu()

        self.optimizer = optimizers.SGD(self.ALPHA)
        self.optimizer.setup(self.model)
        self.q_list = chainer.Variable(xp.zeros((1, 27), dtype=xp.float32))

        self.action = 0
        self.joint1 = self.init_joint1
        self.joint2 = self.init_joint2
        self.joint3 = self.init_joint3
        self.next_joint1 = self.init_next_joint1
        self.next_joint2 = self.init_next_joint2
        self.next_joint3 = self.init_next_joint3

        # history data : D = [s(joint1, joint2, joint3, goal_x, goal_y, goal_z), a, reward, s_dash(joint1', joint2', joint3', x_goal, y_goal, z_goal), end_episode_flag]
        self.D = [
            xp.zeros((self.data_size, 1, 6), dtype=xp.float32),
            xp.zeros(self.data_size, dtype=xp.uint8),
            xp.zeros((self.data_size, 1), dtype=xp.float32),
            xp.zeros((self.data_size, 1, 6), dtype=xp.float32),
            xp.zeros((self.data_size, 1), dtype=np.bool)
        ]
Example #28
0
    def _create_and_initialize_model(self, tags, vocab):
        # The model is feed-forward LSTM:
        # (word_id -> word_emb)_t -> LSTM -> (distribution over tag_id)_t

        self.model = chainer.FunctionSet()
        self.model.embed = F.EmbedID(len(vocab), self.n_lstm_cells)
        self.model.lstm_x_to_h = F.Linear(self.n_lstm_cells,
                                          4 * self.n_lstm_cells)
        self.model.lstm_h_to_h = F.Linear(self.n_lstm_cells,
                                          4 * self.n_lstm_cells)
        self.model.yclf = F.Linear(self.n_lstm_cells, len(tags))

        # Randomly initialize the parameters.
        for param in self.model.parameters:
            param[:] = np.random.uniform(-0.1, 0.1, param.shape)
def init_model(vocab_size, char_type_size):
    model = chainer.FunctionSet(
        embed=F.EmbedID(vocab_size, embed_units),
        char_type_embed = F.EmbedID(char_type_size, char_type_embed_units),
        hidden1=F.Linear(window * (embed_units + char_type_embed_units) + hidden_units, hidden_units),
        i_gate=F.Linear(window * (embed_units + char_type_embed_units) + hidden_units, hidden_units),
        f_gate=F.Linear(window * (embed_units + char_type_embed_units) + hidden_units, hidden_units),
        o_gate=F.Linear(window * (embed_units + char_type_embed_units) + hidden_units, hidden_units),
        output=F.Linear(hidden_units, label_num),
    )
    #opt = optimizers.AdaGrad(lr=learning_rate)
    #opt = optimizers.SGD()
    opt = optimizers.Adam()
    opt.setup(model)
    return model, opt
Example #30
0
	def __init__(self):
		self.joint_state = np.zeros((3), dtype=np.float32)
		#  print self.joint_state
		self.action_num = 0
		self.reward = 0.0

                self.target_point = PointCloud()
                self.target_init_x = 0.91
                self.target_init_y = 0.00
                self.target_init_z = 0.84

		self.num_state = 0
		self.num_step = 0
                self.num_episode = 0

		self.state = 0
		self.next_state = 0

                self.model = chainer.FunctionSet(
                        l1 = F.Linear(6, 1536),
                        l2 = F.Linear(1536, 768),
                        l3 = F.Linear(768, 384),
                        l4 = F.Linear(384, 192),
                        l5 = F.Linear(192, 96),
                        l6 = F.Linear(96, 27, initialW=np.zeros((27, 96), dtype=np.float32)),
                        )
                if args.gpu >= 0:
                    self.model.to_gpu()
                
                #  self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
                self.optimizer = optimizers.SGD()
                self.optimizer.setup(self.model)
                self.q_list = chainer.Variable(xp.zeros((1, 27), dtype=xp.float32))

                self.action = 0
                self.state = 0
                self.next_state = 0
                self.joint1 = self.init_state_joint1
                self.joint3 = self.init_state_joint3
                self.joint5 = self.init_state_joint5
                self.next_joint1 = self.init_state_joint1
                self.next_joint3 = self.init_state_joint3
                self.next_joint5 = self.init_state_joint5