Пример #1
0
def set_model(feature_dimension_size, param, output_units):
    """
    NNのモデルの設定
    @param feature_dimension_size: 入力特徴量の次元数
    @param param: パラメータオブジェクト  
    @param output_units: 出力層のユニット数 

    @return: ネットワークモデル
    """
    if param.HIDDEN_LAYER_NUM == 1:
        # 3層パーセプトロン(入力層、中間層1、出力層)
        model = FunctionSet(l1=F.Linear(feature_dimension_size,
                                        param.NODE_NUM_1LAYER),
                            l2=F.Linear(param.NODE_NUM_1LAYER, output_units))
    if param.HIDDEN_LAYER_NUM == 2:
        # 4層パーセプトロン(入力層、中間層1、中間層2、出力層)
        model = FunctionSet(l1=F.Linear(feature_dimension_size,
                                        param.NODE_NUM_1LAYER),
                            l2=F.Linear(param.NODE_NUM_1LAYER,
                                        param.NODE_NUM_2LAYER),
                            l3=F.Linear(param.NODE_NUM_2LAYER, output_units))
    if param.HIDDEN_LAYER_NUM == 3:
        # 5層パーセプトロン
        model = FunctionSet(l1=F.Linear(feature_dimension_size,
                                        param.NODE_NUM_1LAYER),
                            l2=F.Linear(param.NODE_NUM_1LAYER,
                                        param.NODE_NUM_2LAYER),
                            l3=F.Linear(param.NODE_NUM_2LAYER,
                                        param.NODE_NUM_3LAYER),
                            l4=F.Linear(param.NODE_NUM_3LAYER, output_units))
    return model
Пример #2
0
 def __init__(self, nnpacker, logging=False):
     self.nnpacker = nnpacker
     model = FunctionSet(**nnpacker.getFunctions())
     optimizer = optimizers.SGD()
     lossFunction = F.softmax_cross_entropy
     params = {'epoch': 20, 'batchsize': 100, 'logging': logging}
     NNmanager.__init__(self, model, optimizer, lossFunction, **params)
Пример #3
0
 def __init__(self, optimizer):
     self.model = FunctionSet(l=Linear(self.UNIT_NUM, 2))
     self.optimizer = optimizer
     # true parameters
     self.w = np.random.uniform(-1, 1,
                                (self.UNIT_NUM, 1)).astype(np.float32)
     self.b = np.random.uniform(-1, 1, (1, )).astype(np.float32)
Пример #4
0
    def __init__(self, input_vector_length,enable_controller=[0, 1, 2]):
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller  # Default setting : "Pong"
        self.input_vector_length = input_vector_length

        print "Initializing DQN..."
#   Initialization for Chainer 1.1.0 or older.
#        print "CUDA init"
#        cuda.init()
        
        #inputs --> 5 * 14 (with 10 temporality) + 5 (of last one hour) + 5 (of last 24 hour)
        print "Model Building"
        self.model = FunctionSet(
            l1=F.Linear(input_vector_length, 500),
            l2=F.Linear(500, 250),
            l3=F.Linear(250, 80),
            q_value=F.Linear(80, self.num_of_actions,
                             initialW=np.zeros((self.num_of_actions, 80),
                                               dtype=np.float32))
        ).to_gpu()

        print "Initizlizing Optimizer"
        self.optimizer = optimizers.RMSpropGraves(lr=0.0002, alpha=0.3, momentum=0.2)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.D = [np.zeros((self.data_size, self.input_vector_length), dtype=np.uint8),
                  np.zeros(self.data_size, dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.int8),
                  np.zeros((self.data_size, self.input_vector_length), dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.bool)]
Пример #5
0
    def __init__(self):
        self.model0 = FunctionSet(
            l=F.Convolution2D(1, 1, 5, stride=1, pad=2, nobias=True))
        self.model1 = FunctionSet(
            l=F.Convolution2D(1, 1, 5, stride=1, pad=2, nobias=True))
        #print self.model.l.W.shape

        self.model0.l.W[0, 0, :, :] = np.array(
            [[0, 0, 2, 0, 0], [0, 0, -12, 0, 0], [0, 0, 6, 0, 0],
             [0, 0, 4, 0, 0], [0, 0, 0, 0, 0]]).astype(np.float32) / 12.0
        self.model1.l.W[0, 0, :, :] = np.array(
            [[0, 0, 0, 0, 0], [0, 0, -4, 0, 0], [0, 0, -6, 0, 0],
             [0, 0, 12, 0, 0], [0, 0, -2, 0, 0]]).astype(np.float32) / 12.0
        #print self.model.l.W.shape
        self.model0.to_gpu()
        self.model1.to_gpu()
Пример #6
0
    def __init__(self, use_gpu, enable_controller, dim):
        self.use_gpu = use_gpu
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller
        self.dim = dim

        print("Initializing Q-Network...")
        print("Input Dim of Q-Network : ",self.dim*self.hist_size)

        hidden_dim = 256
        self.model = FunctionSet(
            l4=F.Linear(self.dim*self.hist_size, hidden_dim, wscale=np.sqrt(2)),
            l5=F.Linear(hidden_dim,hidden_dim,wscale=np.sqrt(2)),
            q_value=F.Linear(hidden_dim, self.num_of_actions,
                    initialW=np.zeros((self.num_of_actions, hidden_dim),
                    dtype=np.float32))
        )

        if self.use_gpu >= 0:
            self.model.to_gpu()

        self.model_target = copy.deepcopy(self.model)

        self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.d = [np.zeros((self.data_size, self.hist_size, self.dim),
                    dtype=np.uint8),
                  np.zeros(self.data_size, dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.int8),
                  np.zeros((self.data_size, self.hist_size, self.dim),
                    dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.bool)]
 def __init__(self):
     n_in = 28 * 28
     n_hidden = 100
     self.model = FunctionSet(encode=F.Linear(n_in, n_hidden),
                              decode=F.Linear(n_hidden, n_in))
     self.optimizer = optimizers.Adam()
     self.optimizer.setup(self.model.collect_parameters())
Пример #8
0
    def __init__(self, enable_controller=[0, 3, 4]):
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller  # Default setting : "Pong"

        print "Initializing DQN..."
        #	Initialization of Chainer 1.1.0 or older.
        #        print "CUDA init"
        #        cuda.init()

        print "Model Building"
        self.model = FunctionSet(
            l1=convlstm_link.CONVLSTM(7056, 7056),
            l4=F.Linear(7056, 512, wscale=np.sqrt(2)),
            q_value=F.Linear(512,
                             self.num_of_actions,
                             initialW=np.zeros((self.num_of_actions, 512),
                                               dtype=np.float32))).to_gpu()

        self.model_target = copy.deepcopy(self.model)

        print "Initizlizing Optimizer"
        self.optimizer = optimizers.RMSpropGraves(lr=0.00025,
                                                  alpha=0.95,
                                                  momentum=0.95,
                                                  eps=0.0001)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.D = [
            np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
            np.zeros(self.data_size, dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.int8),
            np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.bool)
        ]
Пример #9
0
    def __init__(self,
                 data=None,
                 target=None,
                 n_inputs=784,
                 n_hidden=784,
                 n_outputs=10,
                 gpu=-1):
        self.excludes.append('xp')
        self.model = FunctionSet(l1=F.Linear(n_inputs, n_hidden),
                                 l2=F.Linear(n_hidden, n_hidden),
                                 l3=F.Linear(n_hidden, n_outputs))

        if gpu >= 0:
            self.model.to_gpu()
            self.xp = cuda.cupy
        else:
            self.xp = np

        if not data is None:
            self.x_train, self.x_test = data
        else:
            self.x_train, self.y_test = None, None

        if not target is None:
            self.y_train, self.y_test = target
            self.n_train = len(self.y_train)
            self.n_test = len(self.y_test)
        else:
            self.y_train, self.y_test = None, None
            self.n_train = 0
            self.n_test = 0

        self.gpu = gpu
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
Пример #10
0
def init_model(model_params):
    wscale1 = model_params.wscale1  # math.sqrt(5 * 5 * 3) * 0.0001
    wscale2 = model_params.wscale2  # math.sqrt(5 * 5 * 32) * 0.01
    wscale3 = model_params.wscale3  # math.sqrt(5 * 5 * 32) * 0.01
    wscale4 = model_params.wscale4  # math.sqrt(576) * 0.1
    wscale5 = model_params.wscale5  # math.sqrt(64) * 0.1
    # wscale1, wscale2, wscale3, wscale4, wscale5 = [math.sqrt(2)] * 5
    model = FunctionSet(conv1=F.Convolution2D(3,
                                              32,
                                              5,
                                              wscale=wscale1,
                                              stride=1,
                                              pad=2),
                        conv2=F.Convolution2D(32,
                                              32,
                                              5,
                                              wscale=wscale2,
                                              stride=1,
                                              pad=2),
                        conv3=F.Convolution2D(32,
                                              64,
                                              5,
                                              wscale=wscale3,
                                              stride=1,
                                              pad=2),
                        fl4=F.Linear(576, 64, wscale=wscale4),
                        fl5=F.Linear(64, 10, wscale=wscale5))
    if params.gpu_flag:
        model.to_gpu()
    return model
Пример #11
0
 def __init__(self,n_act):
   N_output = n_act
   self.model = FunctionSet(
     conv1=F.Convolution2D(1, 16, 3, pad=1),
     conv2=F.Convolution2D(16, 16, 3, pad=1),
     l1=F.Linear(256, 256),
     l2=F.Linear(256, N_output))
def init_model(vocab_size, char_type_size):
    model = FunctionSet(
        embed=F.EmbedID(vocab_size, embed_units),
        char_type_embed=F.EmbedID(char_type_size, char_type_embed_units),
        #dict_embed = F.Linear(12, dict_embed_units),
        hidden1=F.Linear(
            window * (embed_units + char_type_embed_units) * 3 + hidden_units,
            hidden_units),
        i_gate=F.Linear(
            window * (embed_units + char_type_embed_units) * 3 + hidden_units,
            hidden_units),
        f_gate=F.Linear(
            window * (embed_units + char_type_embed_units) * 3 + hidden_units,
            hidden_units),
        o_gate=F.Linear(
            window * (embed_units + char_type_embed_units) * 3 + hidden_units,
            hidden_units),
        output=F.Linear(hidden_units + 12, label_num),
    )
    if opt_selection == 'Adagrad':
        opt = optimizers.AdaGrad(lr=learning_rate)
    elif opt_selection == 'SGD':
        opt = optimizers.SGD()
    elif opt_selection == 'Adam':
        opt = optimizers.Adam()
    else:
        opt = optimizers.AdaGrad(lr=learning_rate)
        print('Adagrad is chosen as defaut')
    opt.setup(model)
    return model, opt
Пример #13
0
    def __init__(self,
                 rng,
                 data,
                 n_inputs=784,
                 n_hidden=784,
                 corruption_level=0.3,
                 gpu=-1,
                 sparse=False):
        """Denoising AutoEncoder
        data: data for train
        n_inputs: a number of units of input layer and output layer
        n_hidden: a number of units of hidden layer
        corruption_level: a ratio of masking noise
        """

        self.model = FunctionSet(encoder=F.Linear(n_inputs, n_hidden),
                                 decoder=F.Linear(n_hidden, n_inputs))

        if gpu >= 0:
            self.model.to_gpu()

        self.gpu = gpu

        self.x_train, self.x_test = data

        self.n_train = len(self.x_train)
        self.n_test = len(self.x_test)

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())
        self.corruption_level = corruption_level
        self.rng = rng
        self.sparse = sparse
Пример #14
0
def main():
    if P.use_mean_var:
        conv6_output = 126
    else:
        conv6_output = 128

    if P.model_name is None:
        model = FunctionSet(conv1=F.Convolution2D(1, 128, 3, stride=1),
                            conv2=F.Convolution2D(128, 128, 3, stride=1),
                            conv3=F.Convolution2D(128, 128, 3, stride=1),
                            conv4=F.Convolution2D(128, 128, 3, stride=1),
                            conv5=F.Convolution2D(128, 128, 3, stride=1),
                            conv6=F.Convolution2D(128,
                                                  conv6_output,
                                                  3,
                                                  stride=1),
                            conv7=F.Convolution2D(128, 128, 1, stride=1),
                            conv8=F.Convolution2D(128, 1, 1, stride=1))
        if P.gpu >= 0:
            cuda.init(P.gpu)
            model.to_gpu()
    else:
        if P.gpu >= 0:
            cuda.init(P.gpu)
        model = pickle.load(open(os.path.join(P.model_dir, P.model_name),
                                 'rb'))

    optimizer = optimizers.MomentumSGD(lr=P.lr, momentum=P.momentum)
    optimizer.setup(model.collect_parameters())

    train(model, optimizer)
    return
Пример #15
0
    def __init__(self, enable_controller=[0, 3, 4]):
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller  # Default setting : "Pong"

        print "Initializing DQN..."
        print "CUDA init"
        cuda.init()

        print "Model Building"
        self.model = FunctionSet(
            l1=F.Convolution2D(4, 16, ksize=8, stride=4, wscale=np.sqrt(2)),
            l2=F.Convolution2D(16, 32, ksize=4, stride=2, wscale=np.sqrt(2)),
            l3=F.Linear(2592, 256),
            q_value=F.Linear(256,
                             self.num_of_actions,
                             initialW=np.zeros((self.num_of_actions, 256),
                                               dtype=np.float32))).to_gpu()

        print "Initizlizing Optimizer"
        self.optimizer = optimizers.RMSpropGraves(lr=0.0002,
                                                  alpha=0.3,
                                                  momentum=0.2)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.D = [
            np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
            np.zeros(self.data_size, dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.int8),
            np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.bool)
        ]
Пример #16
0
    def __init__(self):
        self.model = FunctionSet(l1=F.Convolution2D(4,
                                                    32,
                                                    ksize=8,
                                                    stride=4,
                                                    nobias=False,
                                                    wscale=np.sqrt(2)),
                                 l2=F.Convolution2D(32,
                                                    64,
                                                    ksize=4,
                                                    stride=2,
                                                    nobias=False,
                                                    wscale=np.sqrt(2)),
                                 l3=F.Convolution2D(64,
                                                    64,
                                                    ksize=3,
                                                    stride=1,
                                                    nobias=False,
                                                    wscale=np.sqrt(2)))

        self.model.l1.W = np.load('elite/l1_W.npy')
        self.model.l1.b = np.load('elite/l1_b.npy')
        self.model.l2.W = np.load('elite/l2_W.npy')
        self.model.l2.b = np.load('elite/l2_b.npy')
        self.model.l3.W = np.load('elite/l3_W.npy')
        self.model.l3.b = np.load('elite/l3_b.npy')
Пример #17
0
 def CreateNNs(self):
   assert(len(self.Options['n_units'])>=2)
   assert(self.Options['n_units_err'] is None or len(self.Options['n_units_err'])>=2)
   #Mean model
   n_units= self.Options['n_units']
   self.f_names= ['l%d'%i for i in range(len(n_units)-1)]
   funcs= {}
   for i in range(len(n_units)-1):
     funcs[self.f_names[i]]= F.Linear(n_units[i],n_units[i+1])
   self.model= FunctionSet(**funcs)
   #Error model
   if self.Options['n_units_err']!=None:  n_units= self.Options['n_units_err']
   self.f_names_err= ['l%d'%i for i in range(len(n_units)-1)]
   funcs= {}
   for i in range(len(n_units)-1):
     funcs[self.f_names_err[i]]= F.Linear(n_units[i],n_units[i+1])
   self.model_err= FunctionSet(**funcs)
Пример #18
0
 def __init__(self, n_in, n_hidden, n_epoch=20, batchsize=100, use_cuda=False):
     super().__init__(n_epoch, batchsize, use_cuda)
     
     self.model = FunctionSet(
         encode=F.Linear(n_in, n_hidden),
         decode=F.Linear(n_hidden, n_in)
     )
     self.registModel()
Пример #19
0
 def __init__(self, num_inputs, num_units, dropout_ratio, corruption_level,
              optimizer, gpu):
     model = FunctionSet(layer1=F.Linear(num_inputs, num_units),
                         layer2=F.Linear(num_units, num_units),
                         layer3=F.Linear(num_units, 1))  # 回帰用出力
     self.layers = [model.layer1, model.layer2, model.layer3]
     super(MLP, self).__init__(model, optimizer, dropout_ratio,
                               corruption_level, gpu)
Пример #20
0
    def __init__(self,
                 in_channels,
                 out1,
                 proj3,
                 out3,
                 proj33,
                 out33,
                 pooltype,
                 proj_pool=None,
                 stride=1):
        if out1 > 0:
            assert stride == 1
            assert proj_pool is not None

        self.f = FunctionSet(
            proj3=F.Convolution2D(in_channels, proj3, 1, nobias=True),
            conv3=F.Convolution2D(proj3,
                                  out3,
                                  3,
                                  pad=1,
                                  stride=stride,
                                  nobias=True),
            proj33=F.Convolution2D(in_channels, proj33, 1, nobias=True),
            conv33a=F.Convolution2D(proj33, out33, 3, pad=1, nobias=True),
            conv33b=F.Convolution2D(out33,
                                    out33,
                                    3,
                                    pad=1,
                                    stride=stride,
                                    nobias=True),
            proj3n=F.BatchNormalization(proj3),
            conv3n=F.BatchNormalization(out3),
            proj33n=F.BatchNormalization(proj33),
            conv33an=F.BatchNormalization(out33),
            conv33bn=F.BatchNormalization(out33),
        )

        if out1 > 0:
            self.f.conv1 = F.Convolution2D(in_channels,
                                           out1,
                                           1,
                                           stride=stride,
                                           nobias=True)
            self.f.conv1n = F.BatchNormalization(out1)

        if proj_pool is not None:
            self.f.poolp = F.Convolution2D(in_channels,
                                           proj_pool,
                                           1,
                                           nobias=True)
            self.f.poolpn = F.BatchNormalization(proj_pool)

        if pooltype == 'max':
            self.f.pool = MaxPooling2D(3, stride=stride, pad=1)
        elif pooltype == 'avg':
            self.f.pool = AveragePooling2D(3, stride=stride, pad=1)
        else:
            raise NotImplementedError()
Пример #21
0
 def CreateNNs(self):
     assert (len(self.Options['n_units']) >= 2)
     #Mean model
     n_units = self.Options['n_units']
     self.f_names = ['l%d' % i for i in range(len(n_units) - 1)]
     funcs = {}
     for i in range(len(n_units) - 1):
         funcs[self.f_names[i]] = F.Linear(n_units[i], n_units[i + 1])
     self.model = FunctionSet(**funcs)
Пример #22
0
    def __init__(self, use_gpu, enable_controller, dim):
        self.use_gpu = use_gpu
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller
        self.dim = dim

        print("Initializing Q-Network...")

        hidden_dim1 = 64
        #hidden_dim1 = 32
        hidden_dim2 = 128
        hidden_dim3 = 10
        hidden_cont = 100

        self.model = FunctionSet(
            l4=linearL4_link.LinearL4_link(self.dim * self.hist_size *
                                           self.time_M,
                                           hidden_cont,
                                           wscale=np.sqrt(2)),
            l5=MU_l6.memory_unit_link(self.dim * self.hist_size * self.time_M,
                                      hidden_dim3 * hidden_cont,
                                      wscale=np.sqrt(2)),
            l6=MU_l6.memory_unit_link(self.dim * self.hist_size * self.time_M,
                                      hidden_dim3 * hidden_cont,
                                      wscale=np.sqrt(2)),
            l7=attention.Attention(hidden_cont, hidden_dim3 * hidden_cont,
                                   hidden_dim3),
            l8=retrieval.Retrieval(hidden_dim3, hidden_dim3 * hidden_cont,
                                   hidden_cont),
            l9=F.Bilinear(hidden_cont, hidden_cont, hidden_dim2),
            q_value=F.Linear(hidden_dim2,
                             self.num_of_actions,
                             initialW=np.zeros(
                                 (self.num_of_actions, hidden_dim2),
                                 dtype=np.float32)))
        if self.use_gpu >= 0:
            self.model.to_gpu()

        self.model_target = copy.deepcopy(self.model)

        self.optimizer = optimizers.RMSpropGraves(lr=0.00025,
                                                  alpha=0.95,
                                                  momentum=0.95,
                                                  eps=0.0001)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s(now & 10history), a, r, s_dash, end_episode_flag]
        # modified to MQN
        self.d = [
            np.zeros((self.data_size, self.hist_size * self.time_M, self.dim),
                     dtype=np.uint8),
            np.zeros(self.data_size, dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.int8),
            np.zeros((self.data_size, self.hist_size, self.dim),
                     dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.bool)
        ]
Пример #23
0
 def __init__(self, input_size=32):
     super(CNN3_Model, self).__init__()
     # F.Convolution2D(in_channel, out_channel, filter_size)
     self.model = FunctionSet(  # 1*32*32 -(conv)-> 20*28*28 -(pool)-> 20*14*14
         conv1=F.Convolution2D(1, 20, 5),
         # 20*14*14 -(conv)-> 50*10*10 -(pool)-> 50*5*5=1250
         conv2=F.Convolution2D(20, 50, 5),
         l1=F.Linear(1250, 300),
         l2=F.Linear(300, 2))
Пример #24
0
    def __init__(self, logging=False):
        model = FunctionSet(l1=F.Linear(784, 100),
                            l2=F.Linear(100, 100),
                            l3=F.Linear(100, 10))

        optimizer = optimizers.SGD()
        lossFunction = F.softmax_cross_entropy
        params = {'epoch': 20, 'batchsize': 100, 'logging': logging}
        NNmanager.__init__(self, model, optimizer, lossFunction, **params)
Пример #25
0
 def __init__(self, in_channels, out1, proj3, out3, proj5, out5, proj_pool):
     self.f = FunctionSet(
         conv1=Convolution2D(in_channels, out1, 1),
         proj3=Convolution2D(in_channels, proj3, 1),
         conv3=Convolution2D(proj3, out3, 3, pad=1),
         proj5=Convolution2D(in_channels, proj5, 1),
         conv5=Convolution2D(proj5, out5, 5, pad=2),
         projp=Convolution2D(in_channels, proj_pool, 1),
     )
Пример #26
0
    def __init__(self, n_in, n_out, use_cuda=False):
        self.model = FunctionSet(transform=F.Linear(n_in, n_out))
        self.use_cuda = use_cuda

        if self.use_cuda:
            self.model.to_gpu()

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())
Пример #27
0
 def __init__(self):
     
     model = FunctionSet(l1_x = F.Linear(go.SIZE, 4 * go.SIZE),
                         l1_h = F.Linear(4 * go.SIZE, 4 * go.SIZE),
                         last = F.Linear(4 * go.SIZE, go.SIZE))
     
     loss = F.softmax_cross_entropy()
     
     return
Пример #28
0
 def __init__(self, num_inputs, num_units, dropout_ratio, corruption_level,
              optimizer, gpu):
     model = FunctionSet(
         encode=F.Linear(num_inputs, num_units),
         decode=F.Linear(num_units, num_inputs),
     )
     self.layers = [model.encode, model.decode]
     super(DenoisingAutoEncoder,
           self).__init__(model, optimizer, dropout_ratio, corruption_level,
                          gpu)
Пример #29
0
    def __init__(self, n_in, n_hidden, use_cuda=False):
        self.model = FunctionSet(encode=F.Linear(n_in, n_hidden),
                                 decode=F.Linear(n_hidden, n_in))
        self.use_cuda = use_cuda

        if self.use_cuda:
            self.model.to_gpu()

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model.collect_parameters())
Пример #30
0
 def __init__(self,n_act):
   self.N_input = 64
   N_output = n_act
   #N_unit = (self.N_input-1)*2
   N_unit = 64
   self.model = FunctionSet(
     l1=F.Linear(self.N_input,N_unit),
     #l2=F.Linear(N_unit, N_unit),
     #l3=F.Linear(N_unit, N_unit),
     l4=F.Linear(N_unit, N_output,initialW=np.zeros((N_output, N_unit), dtype=np.float32)))