def train(self, epoch=10, batch_size=32, gpu=False): if gpu: cuda.check_cuda_available() xp = cuda.cupy if gpu else np self.batch_size = batch_size label_types = ['none', 'tap', 'up', 'down', 'right', 'left'] self.model = Alex(len(label_types)) optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9) optimizer.setup(self.model) if gpu: self.model.to_gpu() training_data = TrainingData(IMAGE_ROOT, NOTE_ROOT, VIDEO_ROOT, SONG_LIST_PATH) self.x_train, self.x_test, self.y_train, self.y_test = training_data.get_train_data(label_types) data_size = self.x_train.shape[0] for ep in range(epoch): print('epoch {0}/{1}: (learning rate={2})'.format(ep + 1, epoch, optimizer.lr)) indexes = np.random.permutation(data_size) for i in range(0, data_size, self.batch_size): x_batch = self.x_train[indexes[i:i + self.batch_size]] y_batch = self.y_train[indexes[i:i + self.batch_size]] x = chainer.Variable(x_batch) t = chainer.Variable(y_batch) optimizer.update(self.model, x, t) print("loss: {0}".format(self.model.loss.data)) serializers.save_npz(MODEL_PATH, self.model) optimizer.lr *= 0.97
def main(): training_data = TrainingData(IMAGE_ROOT, NOTE_ROOT, VIDEO_ROOT, SONG_LIST_PATH) x_train, x_test, y_train, y_test = training_data.get_train_data(['none', 'tap', 'up', 'down', 'right', 'left'])