def __init__(self):
        self.num_of_actions = 4

        print "Initializing DQN..."

        print "Model Building"
        self.model = Chain(l1=links.Convolution2D(self.STATE_FRAMES,
                                                  32,
                                                  ksize=8,
                                                  stride=4,
                                                  nobias=False,
                                                  wscale=0.01),
                           l2=links.Convolution2D(32,
                                                  64,
                                                  ksize=4,
                                                  stride=2,
                                                  nobias=False,
                                                  wscale=0.01),
                           l3=links.Convolution2D(64,
                                                  64,
                                                  ksize=3,
                                                  stride=1,
                                                  nobias=False,
                                                  wscale=0.01),
                           l4=links.Linear(3136, 512, wscale=0.01),
                           q_value=links.Linear(512,
                                                self.num_of_actions)).to_gpu()

        self.model_target = copy.deepcopy(self.model)
Exemplo n.º 2
0
    def fit(self,
            X,
            batchsize=100,
            n_iter=100,
            init_smooth=0.8,
            init_scale=0.1,
            lr=0.01,
            optimizer='Momentum'):
        L = np.array([len(seq) for seq in X])
        self.max_length = np.max(L)

        init = X[np.where(L == self.centroid_length)[0]]
        init = np.unique(init)
        init = init[np.random.choice(len(init), self.n_centroid,
                                     replace=False)]
        print(init)
        init_seq = one_hot_encoding(init, self.dict_alphabet, self.max_length,
                                    init_smooth)
        init_seq[np.where(init_seq != 0)] = np.log(
            init_seq[np.where(init_seq != 0)])
        noise = np.random.gumbel(0, 1, init_seq.shape)
        init_seq[np.where(init_seq != 0)] += noise[np.where(init_seq != 0)]
        init_seq *= init_scale
        init_seq = np.transpose(
            np.transpose(init_seq, (1, 0, 2)) - np.mean(init_seq, axis=1),
            (1, 0, 2))

        self.model = Chain(kmeans=SoftKMeansLayer(self.n_centroid,
                                                  self.centroid_length,
                                                  init_W=init_seq,
                                                  tau1=self.tau))

        self.optimizer = {
            'Adam': optimizers.Adam(lr),
            'Momentum': optimizers.MomentumSGD(lr),
            'SGD': optimizers.SGD(lr)
        }[optimizer]

        self.optimizer.setup(self.model)
        self.optimizer.add_hook(chainer.optimizer.WeightDecay(1e-6))
        if self.use_gpu:
            self.model.to_gpu()

        with chainer.using_config('train', True):
            lcurve = []
            for i in range(n_iter):
                self.model.cleargrads()
                indexes = np.random.choice(len(X), batchsize)
                x = X[indexes]
                x = one_hot_encoding(x, self.dict_alphabet, self.max_length)
                if self.use_gpu:
                    x = cupy.array(x)
                loss = self.model.kmeans(x[indexes])
                loss.backward()
                lcurve.append(float(loss.data))
                self.optimizer.update()
                print(i, np.mean(lcurve[-10:]))

        return np.array(lcurve)
Exemplo n.º 3
0
 def __init__(self, nlp, shape, **settings):
     Chain.__init__(self,
         embed=_Embed(shape['nr_vector'], shape['nr_dim'], shape['nr_hidden'],
             initialW=lambda arr: set_vectors(arr, nlp.vocab)),
         encode=_Encode(shape['nr_hidden'], shape['nr_hidden']),
         attend=_Attend(shape['nr_hidden'], shape['nr_hidden']),
         predict=_Predict(shape['nr_hidden'], shape['nr_class']))
     self.to_gpu(0)
class ChainerDQNclass:
    STATE_FRAMES = 4  # number of frames to store in the state

    def __init__(self):
        self.num_of_actions = 4

        print "Initializing DQN..."

        print "Model Building"
        self.model = Chain(l1=links.Convolution2D(self.STATE_FRAMES,
                                                  32,
                                                  ksize=8,
                                                  stride=4,
                                                  nobias=False,
                                                  wscale=0.01),
                           l2=links.Convolution2D(32,
                                                  64,
                                                  ksize=4,
                                                  stride=2,
                                                  nobias=False,
                                                  wscale=0.01),
                           l3=links.Convolution2D(64,
                                                  64,
                                                  ksize=3,
                                                  stride=1,
                                                  nobias=False,
                                                  wscale=0.01),
                           l4=links.Linear(3136, 512, wscale=0.01),
                           q_value=links.Linear(512,
                                                self.num_of_actions)).to_gpu()

        self.model_target = copy.deepcopy(self.model)

        # self.optimizer = optimizers.Adam(alpha=1e-6)
        # self.optimizer.use_cleargrads()
        # self.optimizer.setup(self.model)

    def Q_func(self, state):
        h1 = funcitons.relu(self.model.l1(state))  # scale inputs in [0.0 1.0]
        h2 = funcitons.relu(self.model.l2(h1))
        h3 = funcitons.relu(self.model.l3(h2))
        h4 = funcitons.relu(self.model.l4(h3))
        Q = self.model.q_value(h4)
        return Q

    def Q_func_target(self, state):
        h1 = funcitons.relu(
            self.model_target.l1(state))  # scale inputs in [0.0 1.0]
        h2 = funcitons.relu(self.model_target.l2(h1))
        h3 = funcitons.relu(self.model_target.l3(h2))
        h4 = funcitons.relu(self.model_target.l4(h3))
        Q = self.model_target.q_value(h4)
        return Q

    def target_model_update(self):
        self.model_target = copy.deepcopy(self.model)
    def __init__(self):
        self.model = Chain(conv1=L.Convolution2D(3, 20, 3, 1, 1),
                           conv2=L.Convolution2D(20, 20, 3, 1, 1),
                           conv3=L.Convolution2D(20, 40, 3, 1, 1),
                           conv4=L.Convolution2D(40, 40, 3, 1, 1),
                           linear1=L.Linear(None, 100),
                           linear2=L.Linear(100, 4))

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
Exemplo n.º 6
0
    def __init__(self, use_gpu, enable_controller, cnn_input_dim, feature_dim,
                 agent_count, other_input_dim, model):
        self.use_gpu = use_gpu
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller
        self.cnn_input_dim = cnn_input_dim
        self.feature_dim = feature_dim
        self.agent_count = agent_count
        self.other_input_dim = other_input_dim
        self.data_size = self.timestep_per_episode
        self.loss_log_file = self.loss_log + "loss.log"
        self.loss_per_episode = 0
        self.time_of_episode = 0

        print("Initializing Q-Network...")

        if model == 'None':
            self.model = Chain(
                conv1=L.Convolution2D(3 * self.hist_size, 32, 4, stride=2),
                bn1=L.BatchNormalization(32),
                conv2=L.Convolution2D(32, 32, 4, stride=2),
                bn2=L.BatchNormalization(32),
                conv3=L.Convolution2D(32, 32, 4, stride=2),
                bn3=L.BatchNormalization(32),
                #                 conv4=L.Convolution2D(64, 64, 4, stride=2),
                #                 bn4=L.BatchNormalization(64),
                l1=L.Linear(
                    self.feature_dim + self.other_input_dim * self.hist_size,
                    128),
                l2=L.Linear(128, 128),
                l3=L.Linear(128, 96),
                l4=L.Linear(96, 64),
                q_value=L.Linear(64, self.num_of_actions))
        else:
            with open(model, 'rb') as i:
                self.model = pickle.load(i)
                self.data_size = 0
        if self.use_gpu >= 0:
            self.model.to_gpu()

        self.optimizer = optimizers.RMSpropGraves()
        self.optimizer.setup(self.model)

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.d = [
            np.zeros((self.agent_count, self.data_size, self.hist_size, 128,
                      128, 3),
                     dtype=np.uint8),
            np.zeros((self.agent_count, self.data_size, self.hist_size,
                      self.other_input_dim),
                     dtype=np.uint8),
            np.zeros((self.agent_count, self.data_size), dtype=np.uint8),
            np.zeros((self.agent_count, self.data_size, 1), dtype=np.float32),
            np.zeros((self.agent_count, self.data_size, 1), dtype=np.bool)
        ]
Exemplo n.º 7
0
    def __init__(self):
        self.model = Chain(
            QL=L.EmbedID(int(maxL-minL+1),1),
            QVJ=L.EmbedID(len(VJ_dict), 1),
            QA=L.EmbedID(len(AA)+1,1,ignore_label=0)

        )

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
        self.Z=1
Exemplo n.º 8
0
	def __init__(self,name="perceptron",layers=(1000,1000),optimizer=None,activation=F.sigmoid):
		Network.__init__(self,name)
		self.layers = {}
		for i in range(len(layers)-1):
			layer = L.Linear(layers[i],layers[i+1])
			self.layers['l'+str(i)]=layer
		self.model = Chain(**self.layers)
		if Deel.gpu >=0:
			self.model = self.model.to_gpu(Deel.gpu)
		self.optimizer = optimizers.MomentumSGD(lr=0.01,momentum=0.9)
		self.optimizer.setup(self.model)
		self.activation = activation
Exemplo n.º 9
0
    def __init__(self, d, f, R):
        self.d = d
        self.f = f
        self.R = R
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])

        H = ChainList(*[
            ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
            for j in six.moves.range(5)
        ])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R)])
        self.model = Chain(H=H, W=W, g=g)
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
Exemplo n.º 10
0
def show_test_performance(model: chainer.Chain,
                          test: Iterable[Any],
                          *,
                          device: int = 0,
                          batchsize: int = 256) -> None:
    if device >= 0:
        model.to_gpu()
    test_iter = chainer.iterators.SerialIterator(test,
                                                 batchsize,
                                                 repeat=False,
                                                 shuffle=False)
    test_evaluator = extensions.Evaluator(test_iter, model, device=device)
    results = test_evaluator()
    print("Test accuracy:", results["main/accuracy"])
Exemplo n.º 11
0
def generate_mols(model: chainer.Chain,
                  temp=0.5,
                  z_mu=None,
                  batch_size=20,
                  true_adj=None,
                  device=-1):
    """

    :param model: GraphNVP model
    :param z_mu: latent vector of a molecule
    :param batch_size:
    :param true_adj:
    :param gpu:
    :return:
    """
    device_obj = chainer.backend.get_device(device)
    xp = device_obj.xp
    z_dim = model.adj_size + model.x_size
    mu = xp.zeros([z_dim], dtype=xp.float32)
    sigma_diag = xp.ones([z_dim])

    if model.hyperparams.learn_dist:
        sigma_diag = xp.sqrt(xp.exp(model.ln_var.data)) * sigma_diag
        # sigma_diag = xp.exp(xp.hstack((model.ln_var_x.data, model.ln_var_adj.data)))

    sigma = temp * sigma_diag

    with chainer.no_backprop_mode():
        if z_mu is not None:
            mu = z_mu
            sigma = 0.01 * xp.eye(z_dim, dtype=xp.float32)
        z = xp.random.normal(mu, sigma, (batch_size, z_dim)).astype(xp.float32)
        x, adj = model.reverse(z, true_adj=true_adj)
    return x, adj
Exemplo n.º 12
0
 def __init__(self, d, f, R, gpu):
     self.d = d
     self.f = f
     self.R = R
     self.gpu = gpu
     g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])
     H = ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
     W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R + 1)])
     self.optimizer = optimizers.Adam()
     self.model = Chain(H=H, W=W, g=g)
     if gpu:
         self.model.to_gpu(0)
     self.optimizer.setup(self.model)
     self.to = [[] for i in six.moves.range(2)]
     self.atom_sid = [[] for i in six.moves.range(2)]
     self.anum = [[] for i in six.moves.range(2)]
Exemplo n.º 13
0
    def __init__(self,
                 rng,
                 data,
                 target,
                 n_inputs=784,
                 n_hidden=[784, 784, 784, 784, 784],
                 n_outputs=1,
                 corruption_levels=[0.1, 0.1, 0.1, 0.1, 0.1],
                 gpu=-1):

        self.model = Chain(l1=L.Linear(n_inputs, n_hidden[0]),
                           l2=L.Linear(n_hidden[0], n_hidden[1]),
                           l3=L.Linear(n_hidden[1], n_hidden[2]),
                           l4=L.Linear(n_hidden[2], n_hidden[3]),
                           l5=L.Linear(n_hidden[3], n_hidden[4]),
                           l6=L.Linear(n_hidden[4], n_outputs))

        if gpu >= 0:
            self.model.to_gpu()
            self.xp = cuda.cupy
        else:
            self.xp = np

        self.rng = rng
        self.gpu = gpu
        self.data = data
        self.target = target

        self.x_train, self.x_test = data
        self.y_train, self.y_test = target

        self.n_train = len(self.y_train)
        self.n_test = len(self.y_test)

        self.corruption_levels = corruption_levels
        self.n_inputs = n_inputs
        self.n_hidden = n_hidden
        self.n_outputs = n_outputs
        self.hidden_size = len(n_hidden)

        self.dae1 = None
        self.dae2 = None
        self.dae3 = None
        self.dae4 = None
        self.dae5 = None
        self.optimizer = None
        self.setup_optimizer()
Exemplo n.º 14
0
    def __init__(self, use_gpu, enable_controller, dim, epsilon, epsilon_delta,
                 min_eps):
        self.use_gpu = use_gpu
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller
        self.dim = dim
        self.epsilon = epsilon
        self.epsilon_delta = epsilon_delta
        self.min_eps = min_eps
        self.time = 0

        app_logger.info("Initializing Q-Network...")

        hidden_dim = 256
        self.model = Chain(
            l4=L.Linear(self.dim * self.hist_size,
                        hidden_dim,
                        initialW=initializers.Normal(
                            0.5 / math.sqrt(self.dim * self.hist_size))),
            q_value=L.Linear(hidden_dim,
                             self.num_of_actions,
                             initialW=np.zeros(
                                 (self.num_of_actions, hidden_dim),
                                 dtype=np.float32)))
        if self.use_gpu >= 0:
            self.model.to_gpu()

        self.model_target = copy.deepcopy(self.model)

        self.optimizer = optimizers.RMSpropGraves(lr=0.00025,
                                                  alpha=0.95,
                                                  momentum=0.95,
                                                  eps=0.0001)
        self.optimizer.setup(self.model)

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.d = [
            np.zeros((self.data_size, self.hist_size, self.dim),
                     dtype=np.uint8),
            np.zeros(self.data_size, dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.int8),
            np.zeros((self.data_size, self.hist_size, self.dim),
                     dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.bool)
        ]
Exemplo n.º 15
0
    def __init__(self, owner, kind_name, link, input=None):
        if input == owner:
            input = None

        if input is not None and not isinstance(input, Node):
            raise TypeError("Invalid argument. 'input' must be Node.")
        if not isinstance(link, Link):
            raise TypeError('Cannot register a non-link object as a child')

        Chain.__init__(self)
        Node.__init__(self, owner, kind_name)

        self.allow_multi_inputs = False
        self.output_same_value = True

        if input is not None:
            input.add_ouput(self)
            self.add_input(input)

        with self.init_scope():
            self.link = link
Exemplo n.º 16
0
    def __init__(self,
                 rng,
                 data,
                 n_inputs=784,
                 n_hidden=784,
                 corruption_level=0.05,
                 optimizer=optimizers.Adam,
                 gpu=-1):
        """
        Denoising AutoEncoder
        data: data for train
        n_inputs: a number of units of input layer and output layer
        n_hidden: a number of units of hidden layer
        corruption_level: a ratio of masking noise
        """

        self.model = Chain(encoder=L.Linear(n_inputs, n_hidden),
                           decoder=L.Linear(n_hidden, n_inputs))

        if gpu >= 0:
            self.model.to_gpu()
            self.xp = cuda.cupy
        else:
            self.xp = np

        self.gpu = gpu

        self.x_train, self.x_test = data

        self.n_train = len(self.x_train)
        self.n_test = len(self.x_test)

        self.n_inputs = n_inputs
        self.n_hidden = n_hidden

        self.optimizer = optimizer()
        self.optimizer.setup(self.model)
        self.corruption_level = corruption_level
        self.rng = rng
Exemplo n.º 17
0
Arquivo: fp.py Projeto: pfnet/nfp
    def __init__(self, d, f, R):
        self.d = d
        self.f = f
        self.R = R
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])

        H = ChainList(*[ChainList(*[L.Linear(f, f)
                                    for i in six.moves.range(R)])
                        for j in six.moves.range(5)])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R)])
        self.model = Chain(H=H, W=W, g=g)
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
Exemplo n.º 18
0
    def __init__(self, model, owner=None, kind_name='root', input=None):
        if input == owner:
            input = None

        if input is not None and not isinstance(input, Node):
            raise TypeError("Invalid argument. 'input' must be Node.")

        Chain.__init__(self)
        Node.__init__(self, owner, kind_name)

        self.allow_multi_inputs = False
        self.output_same_value = True

        if input is not None:
            input.add_ouput(self)
            self.add_input(input)

        self.model = model  # この Module が属する Model
        self.nodes = []  # 子ノード列
        self.kindwise_count = {}  # 種類毎の子ノード数
        self.firsts = None  # 最初のノード列
        self.lasts = None  # 最後のノード列
        self.assembly_depth = 0  # これが 0 以外ならノード生成時に子ノードとして登録される、0 なら self.owner の子ノードとして登録される
def get_predicted_scores(
    model: chainer.Chain,
    test_iterator: chainer.iterators.MultiprocessIterator,
    converter: Callable,
    device_id: int,
) -> np.ndarray:

    device = chainer.get_device(device_id)
    device.use()

    model.to_gpu()

    pred_scores = []

    with chainer.using_config("train", False):
        with chainer.using_config("enable_backprop", False):
            for batch in test_iterator:
                inputs = converter(batch, device)
                y = model.predict(*inputs)
                y.to_cpu()
                pred_scores.append(y.data.ravel())

    return np.hstack(pred_scores)
Exemplo n.º 20
0
def darknetConv2D(in_channel, out_channel, bn=True):
    if (bn):
        return Chain(
            c=L.Convolution2D(in_channel,
                              out_channel,
                              ksize=3,
                              pad=1,
                              nobias=True),
            n=L.BatchNormalization(out_channel, use_beta=False, eps=0.000001),
            b=L.Bias(shape=[
                out_channel,
            ]),
        )
    else:
        return Chain(
            c=L.Convolution2D(in_channel,
                              out_channel,
                              ksize=3,
                              pad=1,
                              nobias=True),
            b=L.Bias(shape=[
                out_channel,
            ]),
        )
Exemplo n.º 21
0
Arquivo: fp.py Projeto: pfnet/nfp
 def __init__(self, d, f, R, gpu):
     self.d = d
     self.f = f
     self.R = R
     self.gpu = gpu
     g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])
     H = ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
     W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R + 1)])
     self.optimizer = optimizers.Adam()
     self.model = Chain(H=H, W=W, g=g)
     if gpu:
         self.model.to_gpu(0)
     self.optimizer.setup(self.model)
     self.to = [[] for i in six.moves.range(2)]
     self.atom_sid = [[] for i in six.moves.range(2)]
     self.anum = [[] for i in six.moves.range(2)]
Exemplo n.º 22
0
class Perceptron(Chain, Network):
    def __init__(self,
                 name="perceptron",
                 layers=(1000, 1000),
                 optimizer=None,
                 activation=F.sigmoid):
        Network.__init__(self, name)
        self.layers = {}
        for i in range(len(layers) - 1):
            layer = L.Linear(layers[i], layers[i + 1])
            self.layers['l' + str(i)] = layer
        self.model = Chain(**self.layers)
        if Deel.gpu >= 0:
            self.model = self.model.to_gpu(Deel.gpu)
        self.optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
        self.optimizer.setup(self.model)
        self.activation = activation

    def forward(self, x=None, t=None):
        if x is None:
            x = Tensor.context
        xp = Deel.xp

        volatile = 'off' if Deel.train else 'on'
        h = Variable(np.asarray(x.value, dtype=xp.float32), volatile=volatile)

        self.optimizer.zero_grads()
        for i in range(len(self.layers)):
            h = F.dropout(self.activation(self.layers['l' + str(i)](h)),
                          train=Deel.train)

        h = ChainerTensor(h)
        h.use()

        return h

    def backprop(self, t, x=None):
        if x is None:
            x = Tensor.context
        #loss = F.mean_squared_error(x.content,t.content)
        loss = F.softmax_cross_entropy(x.content, t.content)
        if Deel.train:
            loss.backward()
        accuracy = F.accuracy(x.content, t.content)
        self.optimizer.update()
        return loss.data, accuracy.data
Exemplo n.º 23
0
def train_detector(data, label, model_path='model',  model=None, optimizer=None, n_batch=20, n_epoch=20):
    num_data = len(label)
    if model is None:
        model = Chain(
            conv1=chainer.functions.Convolution2D(1, 32, 3),
            conv2=chainer.functions.Convolution2D(32, 64, 3),
            l1=chainer.functions.Linear(576, 200),
            l2=chainer.functions.Linear(200, 100),
            l3=chainer.functions.Linear(100, 10)
        )
    if optimizer is None:
        optimizer = optimizers.Adam()
    optimizer.setup(model)

    for epoch in range(n_epoch):
        print("epoch : %d" % (epoch + 1))

        # ランダムに並び替える
        perm = numpy.random.permutation(num_data)
        sum_accuracy = 0
        sum_loss = 0

        # バッチサイズごとに学習
        b_start = time.time()
        for i in range(0, num_data, n_batch):
            x_batch = data[perm[i:i + n_batch]]
            t_batch = label[perm[i:i + n_batch]]

            # 勾配を初期化
            optimizer.zero_grads()
            # 順伝搬
            loss, accuracy = forward(x_batch, t_batch, model)
            # 誤差逆伝搬
            loss.backward()
            optimizer.update()  # パラメータ更新

            sum_loss += float(loss.data) * n_batch
            sum_accuracy += float(accuracy.data) * n_batch

        # 誤差と精度を表示
        print("[学習]loss: %f, accuracy: %f, time: %f秒"
              % (sum_loss / num_data, sum_accuracy / num_data, time.time() - b_start))

    # 学習したモデルを保存
    pickle.dump(model, open(model_path, 'wb'), -1)
Exemplo n.º 24
0
class Perceptron(Chain,Network):
	def __init__(self,name="perceptron",layers=(1000,1000),optimizer=None,activation=F.sigmoid):
		Network.__init__(self,name)
		self.layers = {}
		for i in range(len(layers)-1):
			layer = L.Linear(layers[i],layers[i+1])
			self.layers['l'+str(i)]=layer
		self.model = Chain(**self.layers)
		if Deel.gpu >=0:
			self.model = self.model.to_gpu(Deel.gpu)
		self.optimizer = optimizers.MomentumSGD(lr=0.01,momentum=0.9)
		self.optimizer.setup(self.model)
		self.activation = activation
		

	def forward(self,x=None,t=None):
		if x is None:
			x=Tensor.context
		xp = Deel.xp

		volatile = 'off' if Deel.train else 'on'
		h = Variable(xp.asarray(x.value,dtype=xp.float32),volatile=volatile)

		self.optimizer.zero_grads()
		for i in range(len(self.layers)):
			h = F.dropout(self.activation(self.layers['l'+str(i)](h)),train=Deel.train)

		h = ChainerTensor(h)
		h.use()

		return h

	def backprop(self,t,x=None):
		if x is None:
			x=Tensor.context
		#loss = F.mean_squared_error(x.content,t.content)
		loss = F.softmax_cross_entropy(x.content,t.content)
		if  Deel.train:
			loss.backward()
		accuracy = F.accuracy(x.content,t.content)
		self.optimizer.update()
		return loss.data,accuracy.data
Exemplo n.º 25
0
class OldModel():
    def __init__(self):
        self.model = Chain(
            QL=L.EmbedID(int(maxL-minL+1),1),
            QVJ=L.EmbedID(len(VJ_dict), 1),
            QA=L.EmbedID(len(AA)+1,1,ignore_label=0)

        )

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)
        self.Z=1


    def forward(self,data):

        out1=self.model.QL(data[:,[0]])
        out2 = self.model.QVJ(data[:, [1]])
        out3=self.model.QA(data[:,2:])
        out=F.sum(F.concat((out1,out2,out3),axis=1)[:,:,0],axis=1)
        return out

    def get_AA_coeficient(self):
        QA=self.model.QA.W.data
        back_code={val:key for key,val in AA.items()}
        out=np.zeros((20,maxL-minL+1,maxL))
        for i in range(1,len(QA)):
            if i in self.used_coef:
                out[AminoAcide[back_code[i][2]],back_code[i][0]-minL,back_code[i][1]]=np.exp(QA[i])
        return out

    def predict(self,x):
        print('prediction')
        out=[]
        step=50
        for i in range(0,len(x),step):
            px = Variable(x[i:i+step])
            output = F.exp(self.forward(px))/self.Z
            out+=list(output.data)
        return np.array(out)



    def fit(self,DATA,GEN,counts,epochs):
        # Make a training function which takes in training data and targets
        # as an input.

        def train(data, gen, model,counts, batchsize=200000, n_epochs=1):
            lcurve=[]
            data_size = data.shape[0]
            gen_size = gen.shape[0]
            gen_var = Variable(gen)

            #self.model.to_gpu()
            for epoch in range(n_epochs):


                # randomly shuffle the indices of the training data
                shuffler = np.random.permutation(data_size)

                # loop over batches

                for i in range(0, data_size, batchsize):
                    print('epoch %d %d%%' % (epoch + 1,100*i/data_size))
                    data_var = Variable(data[shuffler[i: i + batchsize]])
                    counts_var = Variable(counts[shuffler[i: i + batchsize]])
                    S=np.sum(counts[shuffler[i: i + batchsize]])
                    output_d = self.forward(data_var)
                    output_g = F.exp(self.forward(gen_var))

                    model.zerograds()

                    loss = -(F.sum(output_d*counts_var)-S*F.log(F.sum(output_g)/gen_size))
                    lcurve.append(loss.data)
                    loss.backward()
                    self.optimizer.update()
            return np.array(lcurve)


        self.training=True
        self.lcurve=train(DATA, GEN, self.model, counts, n_epochs=epochs)

        gen_size = GEN.shape[0]
        gen_var = Variable(GEN)
        output_g = F.exp(self.forward(gen_var))
        self.Z = F.sum(output_g) / gen_size

        self.training=False
        self.used_coef=np.unique(DATA[:,2:])
Exemplo n.º 26
0
 def __init__(self, nr_in, nr_out):
     Chain.__init__(self)
Exemplo n.º 27
0
Arquivo: fp.py Projeto: pfnet/nfp
class nfp(object):

    """NFP manager

    This class has the generator function of NFP and
    updator of NN for learning the generator of NFP.

    Args:
        d: Dimension of NFP.
        f: Dimension of the feature for generating NFP.
        R: Radius for generating NFP.
    """
    def __init__(self, d, f, R):
        self.d = d
        self.f = f
        self.R = R
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])

        H = ChainList(*[ChainList(*[L.Linear(f, f)
                                    for i in six.moves.range(R)])
                        for j in six.moves.range(5)])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R)])
        self.model = Chain(H=H, W=W, g=g)
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)

    def get_nfp(self, sid, train=True):
        """Generates NFP.

        Args:
            sid (int): Substance ID.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            fp: NFP.
        """

        d, f, R = self.d, self.f, self.R
        mol = data.load_sdf(sid)
        atoms = mol.GetAtoms()
        n = len(atoms)
        fp = Variable(np.zeros([1, d], dtype='float32'), volatile=not train)
        r = [[Variable(np.zeros([1, f], dtype='float32'), volatile=not train)
              for i in six.moves.range(n)] for j in six.moves.range(R + 1)]
        for atom in atoms:
            a = atom.GetIdx()
            anum = atom.GetAtomicNum()
            r[0][a] += self.model.g[anum](Variable(np.array([[1]],
                                                            dtype='float32'),
                                                   volatile=not train))
        for l in six.moves.range(R):
            v = [Variable(np.zeros([1, f], dtype='float32'),
                          volatile=not train)
                 for i in six.moves.range(n)]
            for atom in atoms:
                a = atom.GetIdx()
                v[a] += r[l][a]
                for n_atom in atom.GetNeighbors():
                    na = n_atom.GetIdx()
                    v[a] += r[l][na]
            for atom in atoms:
                a = atom.GetIdx()
                deg = atom.GetDegree()
                deg = min(5, max(1, deg))
                r[l + 1][a] = F.tanh(self.model.H[deg - 1][l](v[a]))
                i = F.softmax(self.model.W[l](r[l + 1][a]))
                fp += i
        return fp

    def update(self, sids, y, net, train=True):
        """Updates NFP NN.

        Args:
            sids (int[]): Substance ID.
            y (np.array(int32[])[2]): Activity data. y[0] is for
                    the training dataset and y[1] is for the test dataset.
            net (nn.NN): Classifier of QSAR.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            result (float): Overall accuracy on the test dataset.
        """

        def get_nfps(sids, train=True):
            print('generate fingerprints...')
            fps = {}
            for i, sid in enumerate(sids[0] + sids[1]):
                fps[sid] = self.get_nfp(sid, train)
            print('done.')
            return fps

        self.model.zerograds()

        fps = get_nfps(sids, train)
        x_train = [fps[sid] for sid in sids[0]]
        x_test = [fps[sid] for sid in sids[1]]
        for x in x_train:
            x.volatile = 'off'
        for x in x_test:
            x.volatile = 'off'

        result = net.train(x_train, y[0], x_test, y[1], train)
        self.optimizer.update()
        return result
Exemplo n.º 28
0
class DA:
    def __init__(self,
                 rng,
                 data,
                 n_inputs=784,
                 n_hidden=784,
                 corruption_level=0.05,
                 optimizer=optimizers.Adam,
                 gpu=-1):
        """
        Denoising AutoEncoder
        data: data for train
        n_inputs: a number of units of input layer and output layer
        n_hidden: a number of units of hidden layer
        corruption_level: a ratio of masking noise
        """

        self.model = Chain(encoder=L.Linear(n_inputs, n_hidden),
                           decoder=L.Linear(n_hidden, n_inputs))

        if gpu >= 0:
            self.model.to_gpu()
            self.xp = cuda.cupy
        else:
            self.xp = np

        self.gpu = gpu

        self.x_train, self.x_test = data

        self.n_train = len(self.x_train)
        self.n_test = len(self.x_test)

        self.n_inputs = n_inputs
        self.n_hidden = n_hidden

        self.optimizer = optimizer()
        self.optimizer.setup(self.model)
        self.corruption_level = corruption_level
        self.rng = rng

    def forward(self, x_data, train=True):
        y_data = x_data
        # add noise (masking noise)
        x_data = self.get_corrupted_inputs(x_data, train=train)

        x, t = Variable(x_data.reshape(y_data.shape)), Variable(
            y_data.reshape(x_data.shape))

        # encode
        h = self.encode(x)
        # decode
        y = self.decode(h)
        # compute loss
        loss = F.mean_squared_error(y, t)
        return loss

    def compute_hidden(self, x_data):
        # x_data = self.xp.asarray(x_data)
        x = Variable(x_data)
        h = self.encode(x)
        # return cuda.to_cpu(h.data)
        return h.data

    def predict(self, x_data):
        x = Variable(x_data)
        # encode
        h = self.encode(x)
        # decode
        y = self.decode(h)
        return cuda.to_cpu(y.data)

    def encode(self, x):
        return F.relu(self.model.encoder(x))

    def decode(self, h):
        return F.relu(self.model.decoder(h))

    def encoder(self):
        initialW = self.model.encoder.W
        initial_bias = self.model.encoder.b

        return L.Linear(self.n_inputs,
                        self.n_hidden,
                        initialW=initialW,
                        initial_bias=initial_bias)

    def decoder(self):
        return self.model.decoder

    def to_cpu(self):
        self.model.to_cpu()
        self.xp = np

    def to_gpu(self):
        if self.gpu < 0:
            print "something wrong"
            raise
        self.model.to_gpu()
        self.xp = cuda.cupy

    # masking noise
    def get_corrupted_inputs(self, x_data, train=True):
        if train and self.corruption_level != 0.0:
            mask = self.rng.binomial(size=x_data.shape,
                                     n=1,
                                     p=1.0 - self.corruption_level)
            mask = mask.astype(np.float32)
            mask = self.xp.asarray(mask)
            ret = mask * x_data
            # return self.xp.asarray(ret.astype(np.float32))
            return ret
        else:
            return x_data

    def train_and_test(self, n_epoch=5, batchsize=100):
        self.save_accuracy = self.xp.tile([1000.0], 2000)
        self.best_loss = 1.0

        for epoch in xrange(1, n_epoch + 1):
            print 'epoch', epoch

            perm = self.rng.permutation(self.n_train)
            sum_loss = 0
            for i in xrange(0, self.n_train, batchsize):
                x_batch = self.xp.asarray(self.x_train[perm[i:i + batchsize]])

                real_batchsize = len(x_batch)

                self.optimizer.zero_grads()
                loss = self.forward(x_batch)
                loss.backward()
                self.optimizer.update()

                sum_loss += float(loss.data) * real_batchsize

            print 'train mean loss={}'.format(sum_loss / self.n_train)

            # evaluation
            sum_loss = 0
            for i in xrange(0, self.n_test, batchsize):
                x_batch = self.xp.asarray(self.x_test[i:i + batchsize])

                real_batchsize = len(x_batch)
                loss = self.forward(x_batch, train=False)

                sum_loss += float(loss.data) * real_batchsize

            print 'test mean loss={}'.format(sum_loss / self.n_test)

            if (sum_loss / self.n_test) < self.best_loss:
                self.best_loss = sum_loss / self.n_test
                self.best_epoch = epoch
                serializers.save_hdf5('dae.model', self.model)
                print("update best loss")

            #早期終了?
            if self.xp.mean(self.save_accuracy) < sum_loss:
                print("early stopping done")
                break

            #早期終了用配列にsum_accuracyを追加
            self.save_accuracy = self.save_accuracy[1:]
            append = self.xp.array([float(sum_loss)])
            self.save_accuracy = self.xp.hstack((self.save_accuracy, append))

        print("best_epoch: %d" % (self.best_epoch))
        serializers.load_hdf5("dae.model", self.model)
Exemplo n.º 29
0
# 学習データを画像に変換
def conv_feat_2_image(feats):
    data = np.ndarray((len(feats), 1, 28, 28), dtype=np.float32)
    for i, f in enumerate(feats):
        data[i] = f.reshape(28, 28)
    return data


data_train = conv_feat_2_image(data_train)
data_test = conv_feat_2_image(data_test)

# 層のパラメータ
model = Chain(conv1=F.Convolution2D(1, 32, 3),
              conv2=F.Convolution2D(32, 64, 3),
              l1=F.Linear(576, 200),
              l2=F.Linear(200, 100),
              l3=F.Linear(100, 10)).to_gpu()


# 伝播のさせかた
def forward(x, is_train=True):
    h1 = F.max_pooling_2d(F.relu(model.conv1(x)), 3)
    h2 = F.max_pooling_2d(F.relu(model.conv2(h1)), 3)
    h3 = F.dropout(F.relu(model.l1(h2)), train=is_train)
    h4 = F.dropout(F.relu(model.l2(h3)), train=is_train)
    p = model.l3(h4)
    return p


# 学習のさせかた
Exemplo n.º 30
0
class QNet:
    # Hyper-Parameters
    gamma = 0.99  # Discount factor
    initial_exploration = 10**3  # Initial exploratoin. original: 5x10^4
    replay_size = 32  # Replay (batch) size
    target_model_update_freq = 10**4  # Target update frequancy. original: 10^4
    data_size = 10**5  # Data size of history. original: 10^6
    hist_size = 1  # original: 4

    def __init__(self, use_gpu, enable_controller, dim, epsilon, epsilon_delta,
                 min_eps):
        self.use_gpu = use_gpu
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller
        self.dim = dim
        self.epsilon = epsilon
        self.epsilon_delta = epsilon_delta
        self.min_eps = min_eps
        self.time = 0

        app_logger.info("Initializing Q-Network...")

        hidden_dim = 256
        self.model = Chain(l4=L.Linear(self.dim * self.hist_size,
                                       hidden_dim,
                                       initialW=I.HeNormal()),
                           q_value=L.Linear(
                               hidden_dim,
                               self.num_of_actions,
                               initialW=np.zeros(
                                   (self.num_of_actions, hidden_dim),
                                   dtype=np.float32)))
        if self.use_gpu >= 0:
            self.model.to_gpu()

        self.model_target = copy.deepcopy(self.model)

        self.optimizer = optimizers.RMSpropGraves(lr=0.00025,
                                                  alpha=0.95,
                                                  momentum=0.95,
                                                  eps=0.0001)
        self.optimizer.setup(self.model)

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.d = [
            np.zeros((self.data_size, self.hist_size, self.dim),
                     dtype=np.uint8),
            np.zeros(self.data_size, dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.int8),
            np.zeros((self.data_size, self.hist_size, self.dim),
                     dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.bool)
        ]

    def forward(self, state, action, reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        q = self.q_func(s)  # Get Q-value

        # Generate Target Signals
        tmp = self.q_func_target(s_dash)  # Q(s',*)
        if self.use_gpu >= 0:
            tmp = list(map(np.max, tmp.data.get()))  # max_a Q(s',a)
        else:
            tmp = list(map(np.max, tmp.data))  # max_a Q(s',a)

        max_q_dash = np.asanyarray(tmp, dtype=np.float32)
        if self.use_gpu >= 0:
            target = np.asanyarray(q.data.get(), dtype=np.float32)
        else:
            # make new array
            target = np.array(q.data, dtype=np.float32)

        for i in range(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = reward[i] + self.gamma * max_q_dash[i]
            else:
                tmp_ = reward[i]

            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_

        # TD-error clipping
        if self.use_gpu >= 0:
            target = cuda.to_gpu(target)
        td = Variable(target) - q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td / abs(td_tmp) * (abs(td.data) >
                                                                 1)

        zero_val = np.zeros((self.replay_size, self.num_of_actions),
                            dtype=np.float32)
        if self.use_gpu >= 0:
            zero_val = cuda.to_gpu(zero_val)
        zero_val = Variable(zero_val)
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, q

    def q_func(self, state):
        h4 = F.relu(self.model.l4(state / 255.0))
        q = self.model.q_value(h4)
        return q

    def q_func_target(self, state):
        h4 = F.relu(self.model_target.l4(state / 255.0))
        q = self.model_target.q_value(h4)
        return q

    def e_greedy(self, state, epsilon):
        s = Variable(state)
        q = self.q_func(s)
        q = q.data

        if np.random.rand() < epsilon:
            index_action = np.random.randint(0, self.num_of_actions)
            app_logger.info(" Random")
        else:
            if self.use_gpu >= 0:
                index_action = np.argmax(q.get())
            else:
                index_action = np.argmax(q)
            app_logger.info("#Greedy")
        return self.index_to_action(index_action), q

    def target_model_update(self):
        self.model_target = copy.deepcopy(self.model)

    def index_to_action(self, index_of_action):
        return self.enable_controller[index_of_action]

    def action_to_index(self, action):
        return self.enable_controller.index(action)

    def start(self, feature):
        self.state = np.zeros((self.hist_size, self.dim), dtype=np.uint8)
        self.state[0] = feature

        state_ = np.asanyarray(self.state.reshape(1, self.hist_size, self.dim),
                               dtype=np.float32)
        if self.use_gpu >= 0:
            state_ = cuda.to_gpu(state_)

        # Generate an Action e-greedy
        action, q_now = self.e_greedy(state_, self.epsilon)
        return_action = action

        return return_action

    def update_model(self, replayed_experience):
        if replayed_experience[0]:
            self.optimizer.target.cleargrads()
            loss, _ = self.forward(replayed_experience[1],
                                   replayed_experience[2],
                                   replayed_experience[3],
                                   replayed_experience[4],
                                   replayed_experience[5])
            loss.backward()
            self.optimizer.update()

        # Target model update
        if replayed_experience[0] and np.mod(
                self.time, self.target_model_update_freq) == 0:
            app_logger.info("Model Updated")
            self.target_model_update()

        self.time += 1
        app_logger.info("step: {}".format(self.time))

    def step(self, features):
        if self.hist_size == 4:
            self.state = np.asanyarray(
                [self.state[1], self.state[2], self.state[3], features],
                dtype=np.uint8)
        elif self.hist_size == 2:
            self.state = np.asanyarray([self.state[1], features],
                                       dtype=np.uint8)
        elif self.hist_size == 1:
            self.state = np.asanyarray([features], dtype=np.uint8)
        else:
            app_logger.error("self.DQN.hist_size err")

        state_ = np.asanyarray(self.state.reshape(1, self.hist_size, self.dim),
                               dtype=np.float32)
        if self.use_gpu >= 0:
            state_ = cuda.to_gpu(state_)

        # Exploration decays along the time sequence
        if self.initial_exploration < self.time:
            self.epsilon -= self.epsilon_delta
            if self.epsilon < self.min_eps:
                self.epsilon = self.min_eps
            eps = self.epsilon
        else:  # Initial Exploation Phase
            app_logger.info("Initial Exploration : {}/{} steps".format(
                self.time, self.initial_exploration))
            eps = 1.0

        # Generate an Action by e-greedy action selection
        action, q_now = self.e_greedy(state_, eps)

        if self.use_gpu >= 0:
            q_max = np.max(q_now.get())
        else:
            q_max = np.max(q_now)

        return action, eps, q_max
Exemplo n.º 31
0
Arquivo: fp.py Projeto: pfnet/nfp
class nfp(object):

    """NFP manager

    This class has the generator function of NFP and
    updator of NN for learning the generator of NFP.

    Args:
        d: Dimension of NFP.
        f: Dimension of the feature for generating NFP.
        R: Radius for generating NFP.
        gpu (boolean): GPU flag. If you want to use GPU, set it True.
    """
    def __init__(self, d, f, R, gpu):
        self.d = d
        self.f = f
        self.R = R
        self.gpu = gpu
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])
        H = ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R + 1)])
        self.optimizer = optimizers.Adam()
        self.model = Chain(H=H, W=W, g=g)
        if gpu:
            self.model.to_gpu(0)
        self.optimizer.setup(self.model)
        self.to = [[] for i in six.moves.range(2)]
        self.atom_sid = [[] for i in six.moves.range(2)]
        self.anum = [[] for i in six.moves.range(2)]

    def get_nfp(self, sids, train=True):
        """Generates NFP.

        Args:
            sids (int[]): List of substance IDs.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            fp: Dictionary of NFPs. Key is a substance ID.
        """

        d, f, R = self.d, self.f, self.R

        def add_var(x):
            if self.gpu:
                return Variable(cuda.to_gpu(x, 0), volatile=not train)
            else:
                return Variable(x, volatile=not train)

        if train:
            ti = 0
        else:
            ti = 1
        to = self.to[ti]
        atom_sid = self.atom_sid[ti]
        anum = self.anum[ti]
        if len(to) == 0:
            for sid in sids:
                mol = data.load_sdf(sid)
                atoms = mol.GetAtoms()
                n = len(atoms)
                base = len(to)
                to += [[] for i in six.moves.range(n)]
                atom_sid += [sid for i in six.moves.range(n)]
                anum += [1 for i in six.moves.range(n)]
                for atom in atoms:
                    anum[base + atom.GetIdx()] = atom.GetAtomicNum()
                    to[base + atom.GetIdx()] = [base + n_atom.GetIdx()
                                                for n_atom in atom.GetNeighbors()]
            for i in six.moves.range(len(to)):
                if len(to[i]) == 0:
                    to[i].append(i)

        V = len(atom_sid)
        vec = [[] for i in six.moves.range(R + 1)]
        fp = {}
        for l in six.moves.range(R + 1):
            vec[l] = [add_var(np.zeros([1, f], dtype='float32'))
                      for i in six.moves.range(V)]
        for sid in sids:
            fp[sid] = add_var(np.zeros([1, d], dtype='float32'))
        for i in six.moves.range(V):
            vec[0][i] += self.model.g[anum[i]](add_var(np.array([[1]],
                                                                dtype='float32')))
        p = [[] for i in six.moves.range(R)]
        for l in six.moves.range(R):
            p[l] = [to[i][np.random.randint(len(to[i]))]
                    for i in six.moves.range(V)]
            for i in six.moves.range(V):
                vec[l + 1][i] = F.tanh(self.model.H[l]
                                       (vec[l][i] + vec[l][p[l][i]]))

        tmp = [[] for i in six.moves.range(R + 1)]
        for l in six.moves.range(R + 1):
            for i in six.moves.range(V):
                tmp[l].append(F.softmax(self.model.W[l](vec[l][i])))
        for l in six.moves.range(R + 1):
            for i in six.moves.range(V):
                fp[atom_sid[i]] += tmp[l][i]

        return fp

    def update(self, sids, y, net, train=True):
        """Updates NFP NN.

        Args:
            sids (int[]): Substance ID.
            y (np.array(int32[])[2]): Activity data. y[0] is for
                    the training dataset and y[1] is for the test dataset.
            net (nn.NN): Classifier of QSAR.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            result (float): Overall accuracy on the test dataset.
        """
        
        self.model.zerograds()

        fps = self.get_nfp(sids[0] + sids[1], train)

        x_train = [fps[sid] for sid in sids[0]]
        x_test = [fps[sid] for sid in sids[1]]
        for x in x_train:
            x.volatile = 'off'
        for x in x_test:
            x.volatile = 'off'

        result = net.train(x_train, y[0], x_test, y[1], train, self.gpu)

        self.optimizer.update()
        return result
Exemplo n.º 32
0
class FaceNet():
    def __init__(self):
        self.model = Chain(conv1=L.Convolution2D(3, 20, 3, 1, 1),
                           conv2=L.Convolution2D(20, 20, 3, 1, 1),
                           conv3=L.Convolution2D(20, 40, 3, 1, 1),
                           conv4=L.Convolution2D(40, 40, 3, 1, 1),
                           linear1=L.Linear(None, 100),
                           linear2=L.Linear(100, 4))

        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)

    def foward(self, x):
        out = self.model.conv1(x)
        out = F.elu(out)
        out = self.model.conv2(out)

        out = F.max_pooling_2d(out, 2)
        out = F.elu(out)
        out = self.model.conv3(out)
        out = F.elu(out)
        out = self.model.conv4(out)
        out = F.elu(out)

        out = F.average_pooling_2d(out, 6)
        out = F.dropout(out)
        out = self.model.linear1(out)
        out = F.elu(out)
        out = F.dropout(out)
        out = self.model.linear2(out)

        return out

    def predict(self, X, step=100):
        with chainer.using_config('train', False):
            with chainer.no_backprop_mode():
                output = []
                for i in range(0, len(X), step):
                    x = Variable(X[i:i + step])
                    output.append(self.foward(x).data)
                return np.vstack(output)

    def score(self, X, Y, step=100):
        predicted = self.predict(X, step)
        score = F.r2_score(predicted, Y).data
        return score

    def fit(self, X, Y, batchsize=100, n_epoch=10):
        with chainer.using_config('train', True):
            learning_curve = []
            for epoch in range(n_epoch):
                print('epoch ', epoch)
                index = np.random.permutation(len(X))
                for i in range(0, len(index), batchsize):
                    self.model.cleargrads()
                    print(i)
                    x = X[index[i:i + batchsize]]
                    y = Y[index[i:i + batchsize]]
                    #augment(x, y)

                    x = Variable(x)
                    y = Variable(y)

                    output = self.foward(x)
                    loss = F.mean_squared_error(y, output)
                    loss.backward()

                    learning_curve.append(float(loss.data))

                    self.optimizer.update()
            return learning_curve
Exemplo n.º 33
0
class nfp(object):
    """NFP manager

    This class has the generator function of NFP and
    updator of NN for learning the generator of NFP.

    Args:
        d: Dimension of NFP.
        f: Dimension of the feature for generating NFP.
        R: Radius for generating NFP.
        gpu (boolean): GPU flag. If you want to use GPU, set it True.
    """
    def __init__(self, d, f, R, gpu):
        self.d = d
        self.f = f
        self.R = R
        self.gpu = gpu
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])
        H = ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R + 1)])
        self.optimizer = optimizers.Adam()
        self.model = Chain(H=H, W=W, g=g)
        if gpu:
            self.model.to_gpu(0)
        self.optimizer.setup(self.model)
        self.to = [[] for i in six.moves.range(2)]
        self.atom_sid = [[] for i in six.moves.range(2)]
        self.anum = [[] for i in six.moves.range(2)]

    def get_nfp(self, sids, train=True):
        """Generates NFP.

        Args:
            sids (int[]): List of substance IDs.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            fp: Dictionary of NFPs. Key is a substance ID.
        """

        d, f, R = self.d, self.f, self.R

        def add_var(x):
            if self.gpu:
                return Variable(cuda.to_gpu(x, 0), volatile=not train)
            else:
                return Variable(x, volatile=not train)

        if train:
            ti = 0
        else:
            ti = 1
        to = self.to[ti]
        atom_sid = self.atom_sid[ti]
        anum = self.anum[ti]
        if len(to) == 0:
            for sid in sids:
                mol = data.load_sdf(sid)
                atoms = mol.GetAtoms()
                n = len(atoms)
                base = len(to)
                to += [[] for i in six.moves.range(n)]
                atom_sid += [sid for i in six.moves.range(n)]
                anum += [1 for i in six.moves.range(n)]
                for atom in atoms:
                    anum[base + atom.GetIdx()] = atom.GetAtomicNum()
                    to[base + atom.GetIdx()] = [
                        base + n_atom.GetIdx()
                        for n_atom in atom.GetNeighbors()
                    ]
            for i in six.moves.range(len(to)):
                if len(to[i]) == 0:
                    to[i].append(i)

        V = len(atom_sid)
        vec = [[] for i in six.moves.range(R + 1)]
        fp = {}
        for l in six.moves.range(R + 1):
            vec[l] = [
                add_var(np.zeros([1, f], dtype='float32'))
                for i in six.moves.range(V)
            ]
        for sid in sids:
            fp[sid] = add_var(np.zeros([1, d], dtype='float32'))
        for i in six.moves.range(V):
            vec[0][i] += self.model.g[anum[i]](add_var(
                np.array([[1]], dtype='float32')))
        p = [[] for i in six.moves.range(R)]
        for l in six.moves.range(R):
            p[l] = [
                to[i][np.random.randint(len(to[i]))]
                for i in six.moves.range(V)
            ]
            for i in six.moves.range(V):
                vec[l + 1][i] = F.tanh(self.model.H[l](vec[l][i] +
                                                       vec[l][p[l][i]]))

        tmp = [[] for i in six.moves.range(R + 1)]
        for l in six.moves.range(R + 1):
            for i in six.moves.range(V):
                tmp[l].append(F.softmax(self.model.W[l](vec[l][i])))
        for l in six.moves.range(R + 1):
            for i in six.moves.range(V):
                fp[atom_sid[i]] += tmp[l][i]

        return fp

    def update(self, sids, y, net, train=True):
        """Updates NFP NN.

        Args:
            sids (int[]): Substance ID.
            y (np.array(int32[])[2]): Activity data. y[0] is for
                    the training dataset and y[1] is for the test dataset.
            net (nn.NN): Classifier of QSAR.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            result (float): Overall accuracy on the test dataset.
        """

        self.model.zerograds()

        fps = self.get_nfp(sids[0] + sids[1], train)

        x_train = [fps[sid] for sid in sids[0]]
        x_test = [fps[sid] for sid in sids[1]]
        for x in x_train:
            x.volatile = 'off'
        for x in x_test:
            x.volatile = 'off'

        result = net.train(x_train, y[0], x_test, y[1], train, self.gpu)

        self.optimizer.update()
        return result
Exemplo n.º 34
0
 def __init__(self, nr_vector, nr_dim, nr_out, set_vectors=None):
     Chain.__init__(self,
         embed=L.EmbedID(nr_vector, nr_dim, initialW=set_vectors),
         project=L.Linear(None, nr_out, nobias=True))
     self.embed.W.volatile = False
Exemplo n.º 35
0
 def __init__(self, nr_vector, nr_dim, nr_out):
     Chain.__init__(self,
         embed=L.EmbedID(nr_vector, nr_dim),
         project=L.Linear(None, nr_out, nobias=True))
# -*- coding:utf-8 -*-

import numpy as np
import chainer
from chainer import Function, Variable, optimizers
from chainer import Link, Chain
import chainer.functions as F
import chainer.links as L

# main
if __name__ == '__main__':
    x_data = np.array([[1.0, 0.5]], dtype=np.float32)
    t_data = np.array([0])

    model = Chain(layer1=L.Linear(2, 2, initialW=np.array([[0.1, 0.2], [0.3, 0.4]], dtype=np.float32)),
            layer2=L.Linear(2, 2, initialW=np.array([[0.1, 0.2], [0.3, 0.4]], dtype=np.float32)))

    optimizer = optimizers.SGD()
    optimizer.setup(model)

    x = Variable(x_data)
    t = Variable(t_data)
    u1 = model.layer1(x)
    z1 = F.sigmoid(u1)
    u2 = model.layer2(z1)
    y = F.softmax(u2)

    print "x=\n" + str(x.data)
    print "w1=\n" + str(model.layer1.W.data)
    print "b1=\n" + str(model.layer1.b.data)
    print "u1=\n" + str(u1.data)
 def __init__(self):
     # super(MyChain, self).__init__()  # 多继承的时候,这种比较方便,一次性调用所有父类的构造器
     Chain.__init__(self)  # 在子类中调用父类的方法,需要加上基类名作为前缀,且还需传入self
     with self.init_scope():
         self.l1 = L.Linear(4, 3)
         self.l2 = L.Linear(3, 2)
Exemplo n.º 38
0
 def __init__(self, nr_in, nr_out):
     Chain.__init__(self,
         fwd=L.LSTM(nr_in, nr_out),
         bwd=L.LSTM(nr_in, nr_out),
         mix=L.Bilinear(nr_out, nr_out, nr_out))
Exemplo n.º 39
0
    def __call__(self, x):
        return MyBiasFunction()(x, self.b)

# DCNN定数
minibatch_size = 16
feature_num = 4
k = 192

model = Chain(
    layer1=L.Convolution2D(in_channels = feature_num, out_channels = k, ksize = 5, pad = 2),
    layer2=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer3=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer4=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer5=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer6=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer7=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer8=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer9=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer10=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer11=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer12=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer13=L.Convolution2D(in_channels = k, out_channels = 1, ksize = 1, nobias = True),
    layer13_2=MyBias(19*19))

model.to_gpu()

if args.learning_rate != '':
    optimizer = optimizers.SGD(float(args.learning_rate))
else:
    optimizer = optimizers.SGD()
Exemplo n.º 40
0
 def __init__(self, nr_in, nr_out):
     Chain.__init__(self,
         l1=L.Linear(nr_in, nr_in),
         l2=L.Linear(nr_in, nr_out))