コード例 #1
0
ファイル: show.py プロジェクト: keisuke-umezawa/chutil
def show_test_performance(model: chainer.Chain,
                          test: Iterable[Any],
                          *,
                          device: int = 0,
                          batchsize: int = 256) -> None:
    if device >= 0:
        model.to_gpu()
    test_iter = chainer.iterators.SerialIterator(test,
                                                 batchsize,
                                                 repeat=False,
                                                 shuffle=False)
    test_evaluator = extensions.Evaluator(test_iter, model, device=device)
    results = test_evaluator()
    print("Test accuracy:", results["main/accuracy"])
コード例 #2
0
def get_predicted_scores(
    model: chainer.Chain,
    test_iterator: chainer.iterators.MultiprocessIterator,
    converter: Callable,
    device_id: int,
) -> np.ndarray:

    device = chainer.get_device(device_id)
    device.use()

    model.to_gpu()

    pred_scores = []

    with chainer.using_config("train", False):
        with chainer.using_config("enable_backprop", False):
            for batch in test_iterator:
                inputs = converter(batch, device)
                y = model.predict(*inputs)
                y.to_cpu()
                pred_scores.append(y.data.ravel())

    return np.hstack(pred_scores)
コード例 #3
0
ファイル: __init__.py プロジェクト: takus69/deel
class Perceptron(Chain, Network):
    def __init__(self,
                 name="perceptron",
                 layers=(1000, 1000),
                 optimizer=None,
                 activation=F.sigmoid):
        Network.__init__(self, name)
        self.layers = {}
        for i in range(len(layers) - 1):
            layer = L.Linear(layers[i], layers[i + 1])
            self.layers['l' + str(i)] = layer
        self.model = Chain(**self.layers)
        if Deel.gpu >= 0:
            self.model = self.model.to_gpu(Deel.gpu)
        self.optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
        self.optimizer.setup(self.model)
        self.activation = activation

    def forward(self, x=None, t=None):
        if x is None:
            x = Tensor.context
        xp = Deel.xp

        volatile = 'off' if Deel.train else 'on'
        h = Variable(np.asarray(x.value, dtype=xp.float32), volatile=volatile)

        self.optimizer.zero_grads()
        for i in range(len(self.layers)):
            h = F.dropout(self.activation(self.layers['l' + str(i)](h)),
                          train=Deel.train)

        h = ChainerTensor(h)
        h.use()

        return h

    def backprop(self, t, x=None):
        if x is None:
            x = Tensor.context
        #loss = F.mean_squared_error(x.content,t.content)
        loss = F.softmax_cross_entropy(x.content, t.content)
        if Deel.train:
            loss.backward()
        accuracy = F.accuracy(x.content, t.content)
        self.optimizer.update()
        return loss.data, accuracy.data
コード例 #4
0
ファイル: __init__.py プロジェクト: LungTakumi/deel
class Perceptron(Chain,Network):
	def __init__(self,name="perceptron",layers=(1000,1000),optimizer=None,activation=F.sigmoid):
		Network.__init__(self,name)
		self.layers = {}
		for i in range(len(layers)-1):
			layer = L.Linear(layers[i],layers[i+1])
			self.layers['l'+str(i)]=layer
		self.model = Chain(**self.layers)
		if Deel.gpu >=0:
			self.model = self.model.to_gpu(Deel.gpu)
		self.optimizer = optimizers.MomentumSGD(lr=0.01,momentum=0.9)
		self.optimizer.setup(self.model)
		self.activation = activation
		

	def forward(self,x=None,t=None):
		if x is None:
			x=Tensor.context
		xp = Deel.xp

		volatile = 'off' if Deel.train else 'on'
		h = Variable(xp.asarray(x.value,dtype=xp.float32),volatile=volatile)

		self.optimizer.zero_grads()
		for i in range(len(self.layers)):
			h = F.dropout(self.activation(self.layers['l'+str(i)](h)),train=Deel.train)

		h = ChainerTensor(h)
		h.use()

		return h

	def backprop(self,t,x=None):
		if x is None:
			x=Tensor.context
		#loss = F.mean_squared_error(x.content,t.content)
		loss = F.softmax_cross_entropy(x.content,t.content)
		if  Deel.train:
			loss.backward()
		accuracy = F.accuracy(x.content,t.content)
		self.optimizer.update()
		return loss.data,accuracy.data
コード例 #5
0
ファイル: sl_policy.py プロジェクト: vipmath/GoSample2
    layer1=L.Convolution2D(in_channels = feature_num, out_channels = k, ksize = 5, pad = 2),
    layer2=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer3=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer4=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer5=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer6=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer7=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer8=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer9=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer10=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer11=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer12=L.Convolution2D(in_channels = k, out_channels = k, ksize = 3, pad = 1),
    layer13=L.Convolution2D(in_channels = k, out_channels = 1, ksize = 1, nobias = True),
    layer13_2=MyBias(19*19))

model.to_gpu()

if args.learning_rate != '':
    optimizer = optimizers.SGD(float(args.learning_rate))
else:
    optimizer = optimizers.SGD()

optimizer.setup(model)
if args.weight_decay != '':
    optimizer.add_hook(WeightDecay(float(args.weight_decay)))

# Init/Resume
if args.initmodel:
    print('Load model from', args.initmodel)
    serializers.load_npz(args.initmodel, model)
if args.resume:
コード例 #6
0
class QNet:
    # Hyper-Parameters
    gamma = 0.99  # Discount factor
    initial_exploration = 10**3  # Initial exploratoin. original: 5x10^4
    replay_size = 32  # Replay (batch) size
    target_model_update_freq = 10**4  # Target update frequancy. original: 10^4
    data_size = 10**5  # Data size of history. original: 10^6
    hist_size = 1  # original: 4

    def __init__(self, use_gpu, enable_controller, dim, epsilon, epsilon_delta,
                 min_eps):
        self.use_gpu = use_gpu
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller
        self.dim = dim
        self.epsilon = epsilon
        self.epsilon_delta = epsilon_delta
        self.min_eps = min_eps
        self.time = 0

        app_logger.info("Initializing Q-Network...")

        hidden_dim = 256
        self.model = Chain(l4=L.Linear(self.dim * self.hist_size,
                                       hidden_dim,
                                       initialW=I.HeNormal()),
                           q_value=L.Linear(
                               hidden_dim,
                               self.num_of_actions,
                               initialW=np.zeros(
                                   (self.num_of_actions, hidden_dim),
                                   dtype=np.float32)))
        if self.use_gpu >= 0:
            self.model.to_gpu()

        self.model_target = copy.deepcopy(self.model)

        self.optimizer = optimizers.RMSpropGraves(lr=0.00025,
                                                  alpha=0.95,
                                                  momentum=0.95,
                                                  eps=0.0001)
        self.optimizer.setup(self.model)

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.d = [
            np.zeros((self.data_size, self.hist_size, self.dim),
                     dtype=np.uint8),
            np.zeros(self.data_size, dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.int8),
            np.zeros((self.data_size, self.hist_size, self.dim),
                     dtype=np.uint8),
            np.zeros((self.data_size, 1), dtype=np.bool)
        ]

    def forward(self, state, action, reward, state_dash, episode_end):
        num_of_batch = state.shape[0]
        s = Variable(state)
        s_dash = Variable(state_dash)

        q = self.q_func(s)  # Get Q-value

        # Generate Target Signals
        tmp = self.q_func_target(s_dash)  # Q(s',*)
        if self.use_gpu >= 0:
            tmp = list(map(np.max, tmp.data.get()))  # max_a Q(s',a)
        else:
            tmp = list(map(np.max, tmp.data))  # max_a Q(s',a)

        max_q_dash = np.asanyarray(tmp, dtype=np.float32)
        if self.use_gpu >= 0:
            target = np.asanyarray(q.data.get(), dtype=np.float32)
        else:
            # make new array
            target = np.array(q.data, dtype=np.float32)

        for i in range(num_of_batch):
            if not episode_end[i][0]:
                tmp_ = reward[i] + self.gamma * max_q_dash[i]
            else:
                tmp_ = reward[i]

            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_

        # TD-error clipping
        if self.use_gpu >= 0:
            target = cuda.to_gpu(target)
        td = Variable(target) - q  # TD error
        td_tmp = td.data + 1000.0 * (abs(td.data) <= 1)  # Avoid zero division
        td_clip = td * (abs(td.data) <= 1) + td / abs(td_tmp) * (abs(td.data) >
                                                                 1)

        zero_val = np.zeros((self.replay_size, self.num_of_actions),
                            dtype=np.float32)
        if self.use_gpu >= 0:
            zero_val = cuda.to_gpu(zero_val)
        zero_val = Variable(zero_val)
        loss = F.mean_squared_error(td_clip, zero_val)
        return loss, q

    def q_func(self, state):
        h4 = F.relu(self.model.l4(state / 255.0))
        q = self.model.q_value(h4)
        return q

    def q_func_target(self, state):
        h4 = F.relu(self.model_target.l4(state / 255.0))
        q = self.model_target.q_value(h4)
        return q

    def e_greedy(self, state, epsilon):
        s = Variable(state)
        q = self.q_func(s)
        q = q.data

        if np.random.rand() < epsilon:
            index_action = np.random.randint(0, self.num_of_actions)
            app_logger.info(" Random")
        else:
            if self.use_gpu >= 0:
                index_action = np.argmax(q.get())
            else:
                index_action = np.argmax(q)
            app_logger.info("#Greedy")
        return self.index_to_action(index_action), q

    def target_model_update(self):
        self.model_target = copy.deepcopy(self.model)

    def index_to_action(self, index_of_action):
        return self.enable_controller[index_of_action]

    def action_to_index(self, action):
        return self.enable_controller.index(action)

    def start(self, feature):
        self.state = np.zeros((self.hist_size, self.dim), dtype=np.uint8)
        self.state[0] = feature

        state_ = np.asanyarray(self.state.reshape(1, self.hist_size, self.dim),
                               dtype=np.float32)
        if self.use_gpu >= 0:
            state_ = cuda.to_gpu(state_)

        # Generate an Action e-greedy
        action, q_now = self.e_greedy(state_, self.epsilon)
        return_action = action

        return return_action

    def update_model(self, replayed_experience):
        if replayed_experience[0]:
            self.optimizer.target.cleargrads()
            loss, _ = self.forward(replayed_experience[1],
                                   replayed_experience[2],
                                   replayed_experience[3],
                                   replayed_experience[4],
                                   replayed_experience[5])
            loss.backward()
            self.optimizer.update()

        # Target model update
        if replayed_experience[0] and np.mod(
                self.time, self.target_model_update_freq) == 0:
            app_logger.info("Model Updated")
            self.target_model_update()

        self.time += 1
        app_logger.info("step: {}".format(self.time))

    def step(self, features):
        if self.hist_size == 4:
            self.state = np.asanyarray(
                [self.state[1], self.state[2], self.state[3], features],
                dtype=np.uint8)
        elif self.hist_size == 2:
            self.state = np.asanyarray([self.state[1], features],
                                       dtype=np.uint8)
        elif self.hist_size == 1:
            self.state = np.asanyarray([features], dtype=np.uint8)
        else:
            app_logger.error("self.DQN.hist_size err")

        state_ = np.asanyarray(self.state.reshape(1, self.hist_size, self.dim),
                               dtype=np.float32)
        if self.use_gpu >= 0:
            state_ = cuda.to_gpu(state_)

        # Exploration decays along the time sequence
        if self.initial_exploration < self.time:
            self.epsilon -= self.epsilon_delta
            if self.epsilon < self.min_eps:
                self.epsilon = self.min_eps
            eps = self.epsilon
        else:  # Initial Exploation Phase
            app_logger.info("Initial Exploration : {}/{} steps".format(
                self.time, self.initial_exploration))
            eps = 1.0

        # Generate an Action by e-greedy action selection
        action, q_now = self.e_greedy(state_, eps)

        if self.use_gpu >= 0:
            q_max = np.max(q_now.get())
        else:
            q_max = np.max(q_now)

        return action, eps, q_max
コード例 #7
0
class nfp(object):
    """NFP manager

    This class has the generator function of NFP and
    updator of NN for learning the generator of NFP.

    Args:
        d: Dimension of NFP.
        f: Dimension of the feature for generating NFP.
        R: Radius for generating NFP.
        gpu (boolean): GPU flag. If you want to use GPU, set it True.
    """
    def __init__(self, d, f, R, gpu):
        self.d = d
        self.f = f
        self.R = R
        self.gpu = gpu
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])
        H = ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R + 1)])
        self.optimizer = optimizers.Adam()
        self.model = Chain(H=H, W=W, g=g)
        if gpu:
            self.model.to_gpu(0)
        self.optimizer.setup(self.model)
        self.to = [[] for i in six.moves.range(2)]
        self.atom_sid = [[] for i in six.moves.range(2)]
        self.anum = [[] for i in six.moves.range(2)]

    def get_nfp(self, sids, train=True):
        """Generates NFP.

        Args:
            sids (int[]): List of substance IDs.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            fp: Dictionary of NFPs. Key is a substance ID.
        """

        d, f, R = self.d, self.f, self.R

        def add_var(x):
            if self.gpu:
                return Variable(cuda.to_gpu(x, 0), volatile=not train)
            else:
                return Variable(x, volatile=not train)

        if train:
            ti = 0
        else:
            ti = 1
        to = self.to[ti]
        atom_sid = self.atom_sid[ti]
        anum = self.anum[ti]
        if len(to) == 0:
            for sid in sids:
                mol = data.load_sdf(sid)
                atoms = mol.GetAtoms()
                n = len(atoms)
                base = len(to)
                to += [[] for i in six.moves.range(n)]
                atom_sid += [sid for i in six.moves.range(n)]
                anum += [1 for i in six.moves.range(n)]
                for atom in atoms:
                    anum[base + atom.GetIdx()] = atom.GetAtomicNum()
                    to[base + atom.GetIdx()] = [
                        base + n_atom.GetIdx()
                        for n_atom in atom.GetNeighbors()
                    ]
            for i in six.moves.range(len(to)):
                if len(to[i]) == 0:
                    to[i].append(i)

        V = len(atom_sid)
        vec = [[] for i in six.moves.range(R + 1)]
        fp = {}
        for l in six.moves.range(R + 1):
            vec[l] = [
                add_var(np.zeros([1, f], dtype='float32'))
                for i in six.moves.range(V)
            ]
        for sid in sids:
            fp[sid] = add_var(np.zeros([1, d], dtype='float32'))
        for i in six.moves.range(V):
            vec[0][i] += self.model.g[anum[i]](add_var(
                np.array([[1]], dtype='float32')))
        p = [[] for i in six.moves.range(R)]
        for l in six.moves.range(R):
            p[l] = [
                to[i][np.random.randint(len(to[i]))]
                for i in six.moves.range(V)
            ]
            for i in six.moves.range(V):
                vec[l + 1][i] = F.tanh(self.model.H[l](vec[l][i] +
                                                       vec[l][p[l][i]]))

        tmp = [[] for i in six.moves.range(R + 1)]
        for l in six.moves.range(R + 1):
            for i in six.moves.range(V):
                tmp[l].append(F.softmax(self.model.W[l](vec[l][i])))
        for l in six.moves.range(R + 1):
            for i in six.moves.range(V):
                fp[atom_sid[i]] += tmp[l][i]

        return fp

    def update(self, sids, y, net, train=True):
        """Updates NFP NN.

        Args:
            sids (int[]): Substance ID.
            y (np.array(int32[])[2]): Activity data. y[0] is for
                    the training dataset and y[1] is for the test dataset.
            net (nn.NN): Classifier of QSAR.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            result (float): Overall accuracy on the test dataset.
        """

        self.model.zerograds()

        fps = self.get_nfp(sids[0] + sids[1], train)

        x_train = [fps[sid] for sid in sids[0]]
        x_test = [fps[sid] for sid in sids[1]]
        for x in x_train:
            x.volatile = 'off'
        for x in x_test:
            x.volatile = 'off'

        result = net.train(x_train, y[0], x_test, y[1], train, self.gpu)

        self.optimizer.update()
        return result
コード例 #8
0
ファイル: fp.py プロジェクト: pfnet/nfp
class nfp(object):

    """NFP manager

    This class has the generator function of NFP and
    updator of NN for learning the generator of NFP.

    Args:
        d: Dimension of NFP.
        f: Dimension of the feature for generating NFP.
        R: Radius for generating NFP.
        gpu (boolean): GPU flag. If you want to use GPU, set it True.
    """
    def __init__(self, d, f, R, gpu):
        self.d = d
        self.f = f
        self.R = R
        self.gpu = gpu
        g = ChainList(*[L.Linear(1, f) for i in six.moves.range(AtomIdMax)])
        H = ChainList(*[L.Linear(f, f) for i in six.moves.range(R)])
        W = ChainList(*[L.Linear(f, d) for i in six.moves.range(R + 1)])
        self.optimizer = optimizers.Adam()
        self.model = Chain(H=H, W=W, g=g)
        if gpu:
            self.model.to_gpu(0)
        self.optimizer.setup(self.model)
        self.to = [[] for i in six.moves.range(2)]
        self.atom_sid = [[] for i in six.moves.range(2)]
        self.anum = [[] for i in six.moves.range(2)]

    def get_nfp(self, sids, train=True):
        """Generates NFP.

        Args:
            sids (int[]): List of substance IDs.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            fp: Dictionary of NFPs. Key is a substance ID.
        """

        d, f, R = self.d, self.f, self.R

        def add_var(x):
            if self.gpu:
                return Variable(cuda.to_gpu(x, 0), volatile=not train)
            else:
                return Variable(x, volatile=not train)

        if train:
            ti = 0
        else:
            ti = 1
        to = self.to[ti]
        atom_sid = self.atom_sid[ti]
        anum = self.anum[ti]
        if len(to) == 0:
            for sid in sids:
                mol = data.load_sdf(sid)
                atoms = mol.GetAtoms()
                n = len(atoms)
                base = len(to)
                to += [[] for i in six.moves.range(n)]
                atom_sid += [sid for i in six.moves.range(n)]
                anum += [1 for i in six.moves.range(n)]
                for atom in atoms:
                    anum[base + atom.GetIdx()] = atom.GetAtomicNum()
                    to[base + atom.GetIdx()] = [base + n_atom.GetIdx()
                                                for n_atom in atom.GetNeighbors()]
            for i in six.moves.range(len(to)):
                if len(to[i]) == 0:
                    to[i].append(i)

        V = len(atom_sid)
        vec = [[] for i in six.moves.range(R + 1)]
        fp = {}
        for l in six.moves.range(R + 1):
            vec[l] = [add_var(np.zeros([1, f], dtype='float32'))
                      for i in six.moves.range(V)]
        for sid in sids:
            fp[sid] = add_var(np.zeros([1, d], dtype='float32'))
        for i in six.moves.range(V):
            vec[0][i] += self.model.g[anum[i]](add_var(np.array([[1]],
                                                                dtype='float32')))
        p = [[] for i in six.moves.range(R)]
        for l in six.moves.range(R):
            p[l] = [to[i][np.random.randint(len(to[i]))]
                    for i in six.moves.range(V)]
            for i in six.moves.range(V):
                vec[l + 1][i] = F.tanh(self.model.H[l]
                                       (vec[l][i] + vec[l][p[l][i]]))

        tmp = [[] for i in six.moves.range(R + 1)]
        for l in six.moves.range(R + 1):
            for i in six.moves.range(V):
                tmp[l].append(F.softmax(self.model.W[l](vec[l][i])))
        for l in six.moves.range(R + 1):
            for i in six.moves.range(V):
                fp[atom_sid[i]] += tmp[l][i]

        return fp

    def update(self, sids, y, net, train=True):
        """Updates NFP NN.

        Args:
            sids (int[]): Substance ID.
            y (np.array(int32[])[2]): Activity data. y[0] is for
                    the training dataset and y[1] is for the test dataset.
            net (nn.NN): Classifier of QSAR.
            train (boolean): Training flag. If you want to train
                    the NFP NN, set it True, otherwise False.

        Returns:
            result (float): Overall accuracy on the test dataset.
        """
        
        self.model.zerograds()

        fps = self.get_nfp(sids[0] + sids[1], train)

        x_train = [fps[sid] for sid in sids[0]]
        x_test = [fps[sid] for sid in sids[1]]
        for x in x_train:
            x.volatile = 'off'
        for x in x_test:
            x.volatile = 'off'

        result = net.train(x_train, y[0], x_test, y[1], train, self.gpu)

        self.optimizer.update()
        return result
コード例 #9
0
class DA:
    def __init__(self,
                 rng,
                 data,
                 n_inputs=784,
                 n_hidden=784,
                 corruption_level=0.05,
                 optimizer=optimizers.Adam,
                 gpu=-1):
        """
        Denoising AutoEncoder
        data: data for train
        n_inputs: a number of units of input layer and output layer
        n_hidden: a number of units of hidden layer
        corruption_level: a ratio of masking noise
        """

        self.model = Chain(encoder=L.Linear(n_inputs, n_hidden),
                           decoder=L.Linear(n_hidden, n_inputs))

        if gpu >= 0:
            self.model.to_gpu()
            self.xp = cuda.cupy
        else:
            self.xp = np

        self.gpu = gpu

        self.x_train, self.x_test = data

        self.n_train = len(self.x_train)
        self.n_test = len(self.x_test)

        self.n_inputs = n_inputs
        self.n_hidden = n_hidden

        self.optimizer = optimizer()
        self.optimizer.setup(self.model)
        self.corruption_level = corruption_level
        self.rng = rng

    def forward(self, x_data, train=True):
        y_data = x_data
        # add noise (masking noise)
        x_data = self.get_corrupted_inputs(x_data, train=train)

        x, t = Variable(x_data.reshape(y_data.shape)), Variable(
            y_data.reshape(x_data.shape))

        # encode
        h = self.encode(x)
        # decode
        y = self.decode(h)
        # compute loss
        loss = F.mean_squared_error(y, t)
        return loss

    def compute_hidden(self, x_data):
        # x_data = self.xp.asarray(x_data)
        x = Variable(x_data)
        h = self.encode(x)
        # return cuda.to_cpu(h.data)
        return h.data

    def predict(self, x_data):
        x = Variable(x_data)
        # encode
        h = self.encode(x)
        # decode
        y = self.decode(h)
        return cuda.to_cpu(y.data)

    def encode(self, x):
        return F.relu(self.model.encoder(x))

    def decode(self, h):
        return F.relu(self.model.decoder(h))

    def encoder(self):
        initialW = self.model.encoder.W
        initial_bias = self.model.encoder.b

        return L.Linear(self.n_inputs,
                        self.n_hidden,
                        initialW=initialW,
                        initial_bias=initial_bias)

    def decoder(self):
        return self.model.decoder

    def to_cpu(self):
        self.model.to_cpu()
        self.xp = np

    def to_gpu(self):
        if self.gpu < 0:
            print "something wrong"
            raise
        self.model.to_gpu()
        self.xp = cuda.cupy

    # masking noise
    def get_corrupted_inputs(self, x_data, train=True):
        if train and self.corruption_level != 0.0:
            mask = self.rng.binomial(size=x_data.shape,
                                     n=1,
                                     p=1.0 - self.corruption_level)
            mask = mask.astype(np.float32)
            mask = self.xp.asarray(mask)
            ret = mask * x_data
            # return self.xp.asarray(ret.astype(np.float32))
            return ret
        else:
            return x_data

    def train_and_test(self, n_epoch=5, batchsize=100):
        self.save_accuracy = self.xp.tile([1000.0], 2000)
        self.best_loss = 1.0

        for epoch in xrange(1, n_epoch + 1):
            print 'epoch', epoch

            perm = self.rng.permutation(self.n_train)
            sum_loss = 0
            for i in xrange(0, self.n_train, batchsize):
                x_batch = self.xp.asarray(self.x_train[perm[i:i + batchsize]])

                real_batchsize = len(x_batch)

                self.optimizer.zero_grads()
                loss = self.forward(x_batch)
                loss.backward()
                self.optimizer.update()

                sum_loss += float(loss.data) * real_batchsize

            print 'train mean loss={}'.format(sum_loss / self.n_train)

            # evaluation
            sum_loss = 0
            for i in xrange(0, self.n_test, batchsize):
                x_batch = self.xp.asarray(self.x_test[i:i + batchsize])

                real_batchsize = len(x_batch)
                loss = self.forward(x_batch, train=False)

                sum_loss += float(loss.data) * real_batchsize

            print 'test mean loss={}'.format(sum_loss / self.n_test)

            if (sum_loss / self.n_test) < self.best_loss:
                self.best_loss = sum_loss / self.n_test
                self.best_epoch = epoch
                serializers.save_hdf5('dae.model', self.model)
                print("update best loss")

            #早期終了?
            if self.xp.mean(self.save_accuracy) < sum_loss:
                print("early stopping done")
                break

            #早期終了用配列にsum_accuracyを追加
            self.save_accuracy = self.save_accuracy[1:]
            append = self.xp.array([float(sum_loss)])
            self.save_accuracy = self.xp.hstack((self.save_accuracy, append))

        print("best_epoch: %d" % (self.best_epoch))
        serializers.load_hdf5("dae.model", self.model)
コード例 #10
0
class SDA:
    def __init__(self,
                 rng,
                 data,
                 target,
                 n_inputs=784,
                 n_hidden=[784, 784, 784, 784, 784],
                 n_outputs=1,
                 corruption_levels=[0.1, 0.1, 0.1, 0.1, 0.1],
                 gpu=-1):

        self.model = Chain(l1=L.Linear(n_inputs, n_hidden[0]),
                           l2=L.Linear(n_hidden[0], n_hidden[1]),
                           l3=L.Linear(n_hidden[1], n_hidden[2]),
                           l4=L.Linear(n_hidden[2], n_hidden[3]),
                           l5=L.Linear(n_hidden[3], n_hidden[4]),
                           l6=L.Linear(n_hidden[4], n_outputs))

        if gpu >= 0:
            self.model.to_gpu()
            self.xp = cuda.cupy
        else:
            self.xp = np

        self.rng = rng
        self.gpu = gpu
        self.data = data
        self.target = target

        self.x_train, self.x_test = data
        self.y_train, self.y_test = target

        self.n_train = len(self.y_train)
        self.n_test = len(self.y_test)

        self.corruption_levels = corruption_levels
        self.n_inputs = n_inputs
        self.n_hidden = n_hidden
        self.n_outputs = n_outputs
        self.hidden_size = len(n_hidden)

        self.dae1 = None
        self.dae2 = None
        self.dae3 = None
        self.dae4 = None
        self.dae5 = None
        self.optimizer = None
        self.setup_optimizer()

    def setup_optimizer(self):
        self.optimizer = optimizers.Adam()
        self.optimizer.setup(self.model)

    def dae_train(self, rng, n_epoch, batchsize, dae_num, data, n_inputs,
                  n_hidden, corruption_level, gpu):
        #initialize
        dae = DA(rng=rng,
                 data=data,
                 n_inputs=n_inputs,
                 n_hidden=n_hidden,
                 corruption_level=corruption_level,
                 gpu=gpu)

        #train
        print "--------DA%d training has started!--------" % dae_num
        dae.train_and_test(n_epoch=n_epoch, batchsize=batchsize)
        dae.to_cpu()
        # compute outputs for next dAE
        tmp1 = dae.compute_hidden(data[0])
        tmp2 = dae.compute_hidden(data[1])
        if gpu >= 0:
            dae.to_gpu()
        next_inputs = [tmp1, tmp2]
        return dae, next_inputs

    def pre_train(self, n_epoch=20, batchsize=40, sda_name="SDA"):
        first_inputs = self.data
        n_epoch1 = n_epoch
        batchsize1 = batchsize

        # initialize first dAE
        self.dae1, second_inputs = self.dae_train(
            self.rng,
            n_epoch=n_epoch,
            batchsize=batchsize,
            dae_num=1,
            data=first_inputs,
            n_inputs=self.n_inputs,
            n_hidden=self.n_hidden[0],
            corruption_level=self.corruption_levels[0],
            gpu=self.gpu)

        self.dae2, third_inputs = self.dae_train(
            self.rng,
            n_epoch=int(n_epoch),
            batchsize=batchsize,
            dae_num=2,
            data=second_inputs,
            n_inputs=self.n_hidden[0],
            n_hidden=self.n_hidden[1],
            corruption_level=self.corruption_levels[1],
            gpu=self.gpu)

        self.dae3, forth_inputs = self.dae_train(
            self.rng,
            n_epoch=int(n_epoch),
            batchsize=batchsize,
            dae_num=3,
            data=third_inputs,
            n_inputs=self.n_hidden[1],
            n_hidden=self.n_hidden[2],
            corruption_level=self.corruption_levels[2],
            gpu=self.gpu)

        self.dae4, fifth_inputs = self.dae_train(
            self.rng,
            n_epoch=int(n_epoch),
            batchsize=batchsize,
            dae_num=4,
            data=forth_inputs,
            n_inputs=self.n_hidden[2],
            n_hidden=self.n_hidden[3],
            corruption_level=self.corruption_levels[3],
            gpu=self.gpu)

        self.dae5, sixth_inputs = self.dae_train(
            self.rng,
            n_epoch=int(n_epoch),
            batchsize=batchsize,
            dae_num=5,
            data=fifth_inputs,
            n_inputs=self.n_hidden[3],
            n_hidden=self.n_hidden[4],
            corruption_level=self.corruption_levels[4],
            gpu=self.gpu)

        # update model parameters
        self.model.l1 = self.dae1.model.encoder
        self.model.l2 = self.dae2.model.encoder
        self.model.l3 = self.dae3.model.encoder
        self.model.l4 = self.dae4.model.encoder
        self.model.l5 = self.dae5.model.encoder

        self.setup_optimizer()

        model_file = "%s.model" % sda_name
        state_file = "%s.state" % sda_name

        serializers.save_hdf5(model_file, self.model)
        serializers.save_hdf5(state_file, self.optimizer)

    def forward(self, x_data, y_data, train=True, output=False):
        x, t = Variable(x_data), Variable(y_data)
        h1 = F.dropout(F.relu(self.model.l1(x)), train=train)
        h2 = F.dropout(F.relu(self.model.l2(h1)), train=train)
        h3 = F.dropout(F.relu(self.model.l3(h2)), train=train)
        h4 = F.dropout(F.relu(self.model.l4(h3)), train=train)
        h5 = F.dropout(F.relu(self.model.l5(h4)), train=train)
        y = F.tanh(self.model.l6(h5))
        if output:
            return y
        else:
            return F.mean_squared_error(y, t)

    def fine_tune(self, n_epoch=20, batchsize=50):
        train_accs = []
        test_accs = []

        #早期終了用配列
        self.save_accuracy = self.xp.tile([1000.0], 100)

        #ベストLOSS定義
        self.best_loss = 1000.0

        for epoch in xrange(1, n_epoch + 1):
            print 'fine tuning epoch ', epoch

            perm = self.rng.permutation(self.n_train)
            sum_loss = 0
            for i in xrange(0, self.n_train, batchsize):
                x_batch = self.xp.asarray(self.x_train[perm[i:i + batchsize]])
                y_batch = self.xp.asarray(self.y_train[perm[i:i + batchsize]])

                real_batchsize = len(x_batch)

                self.optimizer.zero_grads()
                loss = self.forward(x_batch, y_batch)

                loss.backward()
                self.optimizer.update()

                sum_loss += float(cuda.to_cpu(loss.data)) * real_batchsize

            print 'fine tuning train mean loss={}'.format(sum_loss /
                                                          self.n_train)
            train_accs.append(sum_loss / self.n_train)

            # evaluation
            sum_loss = 0
            for i in xrange(0, self.n_test, batchsize):
                x_batch = self.xp.asarray(self.x_test[i:i + batchsize])
                y_batch = self.xp.asarray(self.y_test[i:i + batchsize])

                real_batchsize = len(x_batch)

                loss = self.forward(x_batch, y_batch, train=False)

                sum_loss += float(cuda.to_cpu(loss.data)) * real_batchsize

            print 'fine tuning test mean loss={}'.format(sum_loss /
                                                         self.n_test)
            test_accs.append(sum_loss / self.n_test)

            if sum_loss < self.best_loss:
                self.best_loss = sum_loss
                self.best_epoch = epoch
                serializers.save_hdf5('mlp.model', self.model)
                print("update best loss")

            #早期終了?
            if self.xp.mean(self.save_accuracy) < sum_loss:
                print("early stopping done")
                break

            #早期終了用配列にsum_accuracyを追加
            self.save_accuracy = self.save_accuracy[1:]
            append = self.xp.array([float(sum_loss)])
            self.save_accuracy = self.xp.hstack((self.save_accuracy, append))

        print("best_epoch: %d" % (self.best_epoch))
        serializers.load_hdf5("mlp.model", self.model)

        return train_accs, test_accs
コード例 #11
0
ファイル: q_net.py プロジェクト: lyp741/Rainbow-Swarm
class QNet:
    # Hyper-Parameters
    gamma = 0.95  # Discount factor
    timestep_per_episode = 5000
    initial_exploration = timestep_per_episode * 1  # Initial exploratoin. original: 5x10^4
    replay_size = 32  # Replay (batch) size
    hist_size = 2  # original: 4
    data_index = 0
    data_flag = False
    loss_log = '../playground/Assets/log/'

    def __init__(self, use_gpu, enable_controller, cnn_input_dim, feature_dim,
                 agent_count, other_input_dim, model):
        self.use_gpu = use_gpu
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller
        self.cnn_input_dim = cnn_input_dim
        self.feature_dim = feature_dim
        self.agent_count = agent_count
        self.other_input_dim = other_input_dim
        self.data_size = self.timestep_per_episode
        self.loss_log_file = self.loss_log + "loss.log"
        self.loss_per_episode = 0
        self.time_of_episode = 0

        print("Initializing Q-Network...")

        if model == 'None':
            self.model = Chain(
                conv1=L.Convolution2D(3 * self.hist_size, 32, 4, stride=2),
                bn1=L.BatchNormalization(32),
                conv2=L.Convolution2D(32, 32, 4, stride=2),
                bn2=L.BatchNormalization(32),
                conv3=L.Convolution2D(32, 32, 4, stride=2),
                bn3=L.BatchNormalization(32),
                #                 conv4=L.Convolution2D(64, 64, 4, stride=2),
                #                 bn4=L.BatchNormalization(64),
                l1=L.Linear(
                    self.feature_dim + self.other_input_dim * self.hist_size,
                    128),
                l2=L.Linear(128, 128),
                l3=L.Linear(128, 96),
                l4=L.Linear(96, 64),
                q_value=L.Linear(64, self.num_of_actions))
        else:
            with open(model, 'rb') as i:
                self.model = pickle.load(i)
                self.data_size = 0
        if self.use_gpu >= 0:
            self.model.to_gpu()

        self.optimizer = optimizers.RMSpropGraves()
        self.optimizer.setup(self.model)

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.d = [
            np.zeros((self.agent_count, self.data_size, self.hist_size, 128,
                      128, 3),
                     dtype=np.uint8),
            np.zeros((self.agent_count, self.data_size, self.hist_size,
                      self.other_input_dim),
                     dtype=np.uint8),
            np.zeros((self.agent_count, self.data_size), dtype=np.uint8),
            np.zeros((self.agent_count, self.data_size, 1), dtype=np.float32),
            np.zeros((self.agent_count, self.data_size, 1), dtype=np.bool)
        ]

    def _reshape_for_cnn(self, state, batch_size, hist_size, x, y):

        state_ = np.zeros((batch_size, 3 * hist_size, 128, 128),
                          dtype=np.float32)
        for i in range(batch_size):
            if self.hist_size == 1:
                state_[i] = state[i][0].transpose(2, 0, 1)
            elif self.hist_size == 2:
                state_[i] = np.c_[state[i][0], state[i][1]].transpose(2, 0, 1)
            elif self.hist_size == 4:
                state_[i] = np.c_[state[i][0], state[i][1], state[i][2],
                                  state[i][3]].transpose(2, 0, 1)

        return state_

    def forward(self, state_cnn, state_other, action, reward, state_cnn_dash,
                state_other_dash, episode_end):

        num_of_batch = state_cnn.shape[0]
        s_cnn = Variable(state_cnn)
        s_oth = Variable(state_other)
        s_cnn_dash = Variable(state_cnn_dash)
        s_oth_dash = Variable(state_other_dash)

        q = self.q_func(s_cnn, s_oth)  # Get Q-value

        max_q_dash_ = self.q_func(s_cnn_dash, s_oth_dash)
        if self.use_gpu >= 0:
            tmp = list(map(np.max, max_q_dash_.data.get()))
        else:
            tmp = list(map(np.max, max_q_dash_.data))
        max_q_dash = np.asanyarray(tmp, dtype=np.float32)
        if self.use_gpu >= 0:
            target = np.array(q.data.get(), dtype=np.float32)
        else:
            target = np.array(q.data, dtype=np.float32)

        for i in range(num_of_batch):
            tmp_ = reward[i] + (1 -
                                episode_end[i]) * self.gamma * max_q_dash[i]

            action_index = self.action_to_index(action[i])
            target[i, action_index] = tmp_

        if self.use_gpu >= 0:
            loss = F.mean_squared_error(Variable(cuda.to_gpu(target)), q)
        else:
            loss = F.mean_squared_error(Variable(target), q)

        return loss, q

    def stock_experience(self, time, state_cnn, state_other, action, reward,
                         state_cnn_dash, state_other_dash, episode_end_flag):

        for i in range(self.agent_count):
            self.d[0][i][self.data_index] = state_cnn[i].copy()
            self.d[1][i][self.data_index] = state_other[i].copy()
            self.d[2][i][self.data_index] = action[i].copy()
            self.d[3][i][self.data_index] = reward[i].copy()
            self.d[4][i][self.data_index] = episode_end_flag

        self.data_index += 1
        if self.data_index >= self.data_size:
            self.data_index -= self.data_size
            self.data_flag = True

    def experience_replay(self, time):
        if self.initial_exploration < time:
            # Pick up replay_size number of samples from the Data
            replayRobotIndex = np.random.randint(0, self.agent_count,
                                                 self.replay_size)
            if not self.data_flag:  # during the first sweep of the History Data
                replay_index = np.random.randint(0, self.data_index,
                                                 self.replay_size)
            else:
                replay_index = np.random.randint(0, self.data_size,
                                                 self.replay_size)

            s_cnn_replay = np.ndarray(shape=(self.replay_size, self.hist_size,
                                             128, 128, 3),
                                      dtype=np.float32)
            s_oth_replay = np.ndarray(shape=(self.replay_size, self.hist_size,
                                             self.other_input_dim),
                                      dtype=np.float32)
            a_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.uint8)
            r_replay = np.ndarray(shape=(self.replay_size, 1),
                                  dtype=np.float32)
            s_cnn_dash_replay = np.ndarray(shape=(self.replay_size,
                                                  self.hist_size, 128, 128, 3),
                                           dtype=np.float32)
            s_oth_dash_replay = np.ndarray(shape=(self.replay_size,
                                                  self.hist_size,
                                                  self.other_input_dim),
                                           dtype=np.float32)
            episode_end_replay = np.ndarray(shape=(self.replay_size, 1),
                                            dtype=np.bool)

            for i in range(self.replay_size):
                s_cnn_replay[i] = np.asarray(
                    (self.d[0][replayRobotIndex[i]][replay_index[i]]),
                    dtype=np.float32)
                s_oth_replay[i] = np.asarray(
                    (self.d[1][replayRobotIndex[i]][replay_index[i]]),
                    dtype=np.float32)
                a_replay[i] = self.d[2][replayRobotIndex[i]][replay_index[i]]
                r_replay[i] = self.d[3][replayRobotIndex[i]][replay_index[i]]
                if (replay_index[i] + 1 >= self.data_size):
                    s_cnn_dash_replay[i] = np.array(
                        (self.d[0][replayRobotIndex[i]][replay_index[i] + 1 -
                                                        self.data_size]),
                        dtype=np.float32)
                    s_oth_dash_replay[i] = np.array(
                        (self.d[1][replayRobotIndex[i]][replay_index[i] + 1 -
                                                        self.data_size]),
                        dtype=np.float32)
                else:
                    s_cnn_dash_replay[i] = np.array(
                        (self.d[0][replayRobotIndex[i]][replay_index[i] + 1]),
                        dtype=np.float32)
                    s_oth_dash_replay[i] = np.array(
                        (self.d[1][replayRobotIndex[i]][replay_index[i] + 1]),
                        dtype=np.float32)
                episode_end_replay[i] = self.d[4][replayRobotIndex[i]][
                    replay_index[i]]

            s_cnn_replay = self._reshape_for_cnn(s_cnn_replay,
                                                 self.replay_size,
                                                 self.hist_size, 128, 128)
            s_cnn_dash_replay = self._reshape_for_cnn(s_cnn_dash_replay,
                                                      self.replay_size,
                                                      self.hist_size, 128, 128)

            s_cnn_replay /= 255.0
            s_oth_replay /= 255.0
            s_cnn_dash_replay /= 255.0
            s_oth_dash_replay /= 255.0

            if self.use_gpu >= 0:
                s_cnn_replay = cuda.to_gpu(s_cnn_replay)
                s_oth_replay = cuda.to_gpu(s_oth_replay)
                s_cnn_dash_replay = cuda.to_gpu(s_cnn_dash_replay)
                s_oth_dash_replay = cuda.to_gpu(s_oth_dash_replay)

            # Gradient-based update
            loss, _ = self.forward(s_cnn_replay, s_oth_replay, a_replay,
                                   r_replay, s_cnn_dash_replay,
                                   s_oth_dash_replay, episode_end_replay)
            send_loss = loss.data
            with open(self.loss_log_file, 'a') as the_file:
                the_file.write(str(time) + "," + str(send_loss) + "\n")
            self.loss_per_episode += loss.data
            self.time_of_episode += 1
            self.model.zerograds()
            loss.backward()
            self.optimizer.update()

    def q_func(self, state_cnn, state_other):
        if self.use_gpu >= 0:
            num_of_batch = state_cnn.data.get().shape[0]
        else:
            num_of_batch = state_cnn.data.shape[0]

        h1 = F.tanh(self.model.bn1(self.model.conv1(state_cnn)))
        h2 = F.tanh(self.model.bn2(self.model.conv2(h1)))
        h3 = F.tanh(self.model.bn3(self.model.conv3(h2)))
        #         h4 = F.tanh(self.model.bn4(self.model.conv4(h3)))
        #         h5 = F.tanh(self.model.bn5(self.model.conv5(h4)))

        h4_ = F.concat(
            (F.reshape(h3, (num_of_batch, self.feature_dim)),
             F.reshape(state_other,
                       (num_of_batch, self.other_input_dim * self.hist_size))),
            axis=1)

        h6 = F.relu(self.model.l1(h4_))
        h7 = F.relu(self.model.l2(h6))
        h8 = F.relu(self.model.l3(h7))
        h9 = F.relu(self.model.l4(h8))
        q = self.model.q_value(h9)
        return q

    def e_greedy(self, state_cnn, state_other, epsilon, reward):
        s_cnn = Variable(state_cnn)
        s_oth = Variable(state_other)
        q = self.q_func(s_cnn, s_oth)
        q = q.data
        if self.use_gpu >= 0:
            q_ = q.get()
        else:
            q_ = q

        index_action = np.zeros((self.agent_count), dtype=np.uint8)

        print(("agent"), end=' ')
        for i in range(self.agent_count):
            if np.random.rand() < epsilon:
                index_action[i] = np.random.randint(0, self.num_of_actions)
                print(("[%02d] Random(%2d)reward(%06.2f)" %
                       (i, index_action[i], reward[i])),
                      end=' ')
            else:
                index_action[i] = np.argmax(q_[i])
                print(("[%02d]!Greedy(%2d)reward(%06.2f)" %
                       (i, index_action[i], reward[i])),
                      end=' ')
            if i % 5 == 4:
                print(("\n     "), end=' ')

        del q_

        return self.index_to_action(index_action), q

    def index_to_action(self, index_of_action):
        index = np.zeros((self.agent_count), dtype=np.uint8)
        for i in range(self.agent_count):
            index[i] = self.enable_controller[index_of_action[i]]
        return index

    def action_to_index(self, action):
        return self.enable_controller.index(action)
コード例 #12
0
ファイル: mnist.py プロジェクト: masenov/bullet-nn
train, test = datasets.get_mnist(ndim=2)
import pdb
pdb.set_trace()
# Setup Optimizer
optimizer = optimizers.Adam()
optimizer.setup(model_0)

# mnist_train_0(train,test,5)
# mnist_train_0_batch(train,test, nb_epochs = 1)  ## Takes a very long time

train, test = datasets.get_mnist(ndim=3)  # Return the channel dimension too
# Send the model to the gpu
gpu_id = 0
cuda.get_device(gpu_id).use()
model_0.to_gpu()
# mnist_train_0_gpu(train,test, nb_epochs=5)

stats = model_statistics(model_0)
print("Number of parameters: ", stats[0], "Memory usage in MiB:", stats[1])

model_1 = SimpleCNN0()
model_1.name = "SimpleCNN0"

## Setup optimizer
optim = optimizers.Adam()
optim.setup(model_1)
optim.use_cleargrads()
## Send to GPU
model_1.to_gpu()
コード例 #13
0
class SoftSeqKmeans():
    def __init__(self,
                 n_centroid,
                 centroid_length,
                 alphabet,
                 use_gpu=True,
                 tau=2):
        self.model = None
        self.optimizer = None
        self.centroid_length = centroid_length
        self.n_centroid = n_centroid
        self.tau = tau
        self.use_gpu = use_gpu
        self.alphabet = alphabet
        self.dict_alphabet = {alphabet[i]: i for i in range(len(alphabet))}
        self.max_length = None

    def fit(self,
            X,
            batchsize=100,
            n_iter=100,
            init_smooth=0.8,
            init_scale=0.1,
            lr=0.01,
            optimizer='Momentum'):
        L = np.array([len(seq) for seq in X])
        self.max_length = np.max(L)

        init = X[np.where(L == self.centroid_length)[0]]
        init = np.unique(init)
        init = init[np.random.choice(len(init), self.n_centroid,
                                     replace=False)]
        print(init)
        init_seq = one_hot_encoding(init, self.dict_alphabet, self.max_length,
                                    init_smooth)
        init_seq[np.where(init_seq != 0)] = np.log(
            init_seq[np.where(init_seq != 0)])
        noise = np.random.gumbel(0, 1, init_seq.shape)
        init_seq[np.where(init_seq != 0)] += noise[np.where(init_seq != 0)]
        init_seq *= init_scale
        init_seq = np.transpose(
            np.transpose(init_seq, (1, 0, 2)) - np.mean(init_seq, axis=1),
            (1, 0, 2))

        self.model = Chain(kmeans=SoftKMeansLayer(self.n_centroid,
                                                  self.centroid_length,
                                                  init_W=init_seq,
                                                  tau1=self.tau))

        self.optimizer = {
            'Adam': optimizers.Adam(lr),
            'Momentum': optimizers.MomentumSGD(lr),
            'SGD': optimizers.SGD(lr)
        }[optimizer]

        self.optimizer.setup(self.model)
        self.optimizer.add_hook(chainer.optimizer.WeightDecay(1e-6))
        if self.use_gpu:
            self.model.to_gpu()

        with chainer.using_config('train', True):
            lcurve = []
            for i in range(n_iter):
                self.model.cleargrads()
                indexes = np.random.choice(len(X), batchsize)
                x = X[indexes]
                x = one_hot_encoding(x, self.dict_alphabet, self.max_length)
                if self.use_gpu:
                    x = cupy.array(x)
                loss = self.model.kmeans(x[indexes])
                loss.backward()
                lcurve.append(float(loss.data))
                self.optimizer.update()
                print(i, np.mean(lcurve[-10:]))

        return np.array(lcurve)

    def transform(self, X, batchsize=1000):
        labels = []
        with chainer.using_config('train', False):
            with chainer.no_backprop_mode():
                for i in range(0, len(X), batchsize):
                    print(i)
                    x = X[i:i + batchsize]
                    x = one_hot_encoding(x, self.dict_alphabet,
                                         self.max_length)
                    if self.use_gpu:
                        x = cupy.array(x)
                    loss, indexes = self.model.kmeans(x, inference=True)
                    labels.append(indexes)
        return np.concatenate(labels)

    def get_centroid(self):
        return cupy.asnumpy(self.model.kmeans.get_centroid())
コード例 #14
0
class SeqKmeans():
    def __init__(self,
                 n_centroid,
                 centroid_length,
                 alphabet,
                 use_gpu=True,
                 tau=2):
        self.model = None
        self.optimizer = None
        self.centroid_length = centroid_length
        self.n_centroid = n_centroid
        self.tau = tau
        self.use_gpu = use_gpu
        self.alphabet = alphabet
        self.dict_alphabet = {alphabet[i]: i for i in range(len(alphabet))}
        self.max_length = None

    def get_initialize_points(self, X, smooth, n_centroid):
        X = cupy.array(one_hot_encoding(X, self.dict_alphabet, self.max_length,
                                        smooth),
                       dtype=np.float32)
        I = np.ravel(np.broadcast_to(np.arange(len(X)), (len(X), len(X))).T)
        J = np.ravel(np.broadcast_to(np.arange(len(X)), (len(X), len(X))))
        d = edit_distance(X[I], X[J]).reshape((len(X), len(X)))
        d = cupy.asnumpy(d)
        out = [random.randint(0, len(X) - 1)]
        for i in range(n_centroid - 1):
            min_d = np.min(d[:, out], axis=1)
            new_point = np.random.choice(len(min_d),
                                         1,
                                         p=min_d / np.sum(min_d))
            out.append(new_point)
        return cupy.asnumpy(X)[out, :, :]

    def fit(self,
            X,
            mini_batch=1000,
            subsample_batch=100,
            n_iter=100,
            step_per_iter=10,
            init_smooth=0.8,
            init_scale=0.1,
            lr=0.1,
            optimizer='SGD'):
        L = np.array([len(seq) for seq in X])
        self.max_length = np.max(L)

        init = X[np.where(L == self.centroid_length)[0]]
        init = np.unique(init)
        if len(init) > self.n_centroid * 100:
            init = init[np.random.choice(len(init),
                                         self.n_centroid * 100,
                                         replace=False)]

        init_seq = self.get_initialize_points(init, init_smooth,
                                              self.n_centroid)
        """init_seq = one_hot_encoding(init, self.dict_alphabet, self.max_length, init_smooth)
        init_seq[np.where(init_seq != 0)] = np.log(init_seq[np.where(init_seq != 0)])
        noise = np.random.gumbel(0, 1, init_seq.shape)
        init_seq[np.where(init_seq != 0)] += noise[np.where(init_seq != 0)]
        init_seq *= init_scale"""
        init_seq = np.transpose(
            np.transpose(init_seq, (1, 0, 2)) - np.mean(init_seq, axis=1),
            (1, 0, 2))

        self.model = Chain(kmeans=KMeansLayer(self.n_centroid,
                                              self.centroid_length,
                                              init_W=init_seq,
                                              tau=self.tau))
        self.optimizer = {
            'Adam': optimizers.Adam(lr),
            'Momentum': optimizers.MomentumSGD(lr),
            'SGD': optimizers.SGD(lr)
        }[optimizer]
        self.optimizer.setup(self.model)
        self.optimizer.add_hook(chainer.optimizer.WeightDecay(1e-6))
        if self.use_gpu:
            self.model.to_gpu()

        with chainer.using_config('train', True):
            lcurve = []
            for i in range(n_iter):
                self.model.cleargrads()
                indexes = np.random.choice(len(X), mini_batch)
                x = X[indexes]
                x = one_hot_encoding(x, self.dict_alphabet, self.max_length)
                if self.use_gpu:
                    x = cupy.array(x)
                with chainer.no_backprop_mode():
                    _, labels = self.model.kmeans(x, inference=True)
                labels_indexes = [
                    np.where(labels == u)[0] for u in np.unique(labels)
                ]
                for j in range(step_per_iter):
                    indexes = []
                    for row in labels_indexes:
                        indexes += np.random.choice(
                            row,
                            subsample_batch // len(labels_indexes)).tolist()

                    loss = self.model.kmeans(x[indexes],
                                             indexes=labels[indexes])
                    loss = F.mean(loss)
                    loss.backward()
                    lcurve.append(float(loss.data))
                    self.optimizer.update()
                    print(i, j, np.mean(lcurve[-10:]))

        return np.array(lcurve)

    def transform(self, X, batchsize=1000):
        labels = []
        with chainer.using_config('train', False):
            with chainer.no_backprop_mode():
                for i in range(0, len(X), batchsize):
                    print(i)
                    x = X[i:i + batchsize]
                    x = one_hot_encoding(x, self.dict_alphabet,
                                         self.max_length)
                    if self.use_gpu:
                        x = cupy.array(x)
                    loss, indexes = self.model.kmeans(x, inference=True)
                    labels.append(indexes)
        return np.concatenate(labels)

    def get_centroid(self):
        return cupy.asnumpy(self.model.kmeans.get_centroid())