Exemplo n.º 1
0
 def __init__(self,
              name,
              observation_space_shape,
              num_actions,
              pretrained_policy=None,
              *args,
              **kwargs):
     self.name = name
     self.observation_space_shape = observation_space_shape
     self.num_actions = num_actions
     self._build_network(pretrained_policy)
     self.trainer = Trainer(self.q, self.loss,
                            [sgd(self.q.parameters, lr=5e-4)])
Exemplo n.º 2
0
 def __init__(self,
              name,
              observation_space_shape,
              num_actions,
              pretrained_policy=None,
              *args,
              **kwargs):
     self.name = name
     self.observation_space_shape = observation_space_shape
     self.num_actions = num_actions
     self._build_network(pretrained_policy)
     self.trainer = Trainer(
         self.value, self.loss,
         [adam(self.value.parameters, lr=0.001, momentum=0.9)])
Exemplo n.º 3
0
 def __init__(self,
              name,
              num_frames_to_stack,
              observation_space_shape,
              num_actions,
              pretrained_policy=None,
              *args,
              **kwargs):
     self.name = name
     self.num_frames_to_stack = num_frames_to_stack
     self.observation_space_shape = observation_space_shape
     self.num_actions = num_actions
     self._build_network(pretrained_policy)
     self.trainer = Trainer(
         self.log_probability, self.loss,
         [adam(self.probabilities.parameters, lr=0.00001, momentum=0.9)])
Exemplo n.º 4
0
 def __init__(self,
              name,
              num_frames_to_stack,
              observation_space_shape,
              num_actions,
              pretrained_policy=None,
              *args,
              **kwargs):
     self.name = name
     self.num_frames_to_stack = num_frames_to_stack
     self.observation_space_shape = observation_space_shape
     self.frame_stacker = FrameStacker(stack_size=num_frames_to_stack,
                                       frame_shape=observation_space_shape)
     self.num_actions = num_actions
     self._build_network(pretrained_policy)
     self.trainer = Trainer(self.q, self.loss,
                            [sgd(self.q.parameters, lr=0.000001)])
    def __init__(self,
                 in_shape,
                 output_shape,
                 device_id=None,
                 learning_rate=0.00025,
                 momentum=0.9,
                 minibatch_size=32,
                 update_interval=10000,
                 n_workers=1,
                 visualizer=None):
        """
        Q Neural Network following Mnih and al. implementation and default options.

        The network has the following topology:
        Convolution(32, (8, 8))
        Convolution(64, (4, 4))
        Convolution(64, (2, 2))
        Dense(512)

        :param in_shape: Shape of the observations perceived by the learner (the neural net input)
        :param output_shape: Size of the action space (mapped to the number of output neurons)

        :param device_id: Use None to let CNTK select the best available device,
                          -1 for CPU, >= 0 for GPU
                          (default: None)

        :param learning_rate: Learning rate
                              (default: 0.00025, as per Mnih et al.)

        :param momentum: Momentum, provided as momentum value for
                         averaging gradients without unit gain filter
                         Note that CNTK does not currently provide an implementation
                         of Graves' RmsProp with momentum.
                         It uses AdamSGD optimizer instead.
                         (default: 0, no momentum with RProp optimizer)

        :param minibatch_size: Minibatch size
                               (default: 32, as per Mnih et al.)

        :param n_workers: Number of concurrent worker for distributed training.
                          (default: 1, not distributed)

        :param visualizer: Optional visualizer allowing the model to save summary data
                           (default: None, no visualization)

        Ref: Mnih et al.: "Human-level control through deep reinforcement learning."
        Nature 518.7540 (2015): 529-533.
        """

        assert learning_rate > 0, 'learning_rate should be > 0'
        assert 0. <= momentum < 1, 'momentum should be 0 <= momentum < 1'

        QModel.__init__(self, in_shape, output_shape)
        CntkModel.__init__(self, device_id, False, n_workers, visualizer)

        self._nb_actions = output_shape
        self._steps = 0
        self._target_update_interval = update_interval
        self._target = None

        # Input vars
        self._environment = input(in_shape,
                                  name='env',
                                  dynamic_axes=(Axis.default_batch_axis()))
        self._q_targets = input(1,
                                name='q_targets',
                                dynamic_axes=(Axis.default_batch_axis()))
        self._actions = input(output_shape,
                              name='actions',
                              dynamic_axes=(Axis.default_batch_axis()))

        # Define the neural network graph
        self._model = self._build_model()(self._environment)
        self._target = self._model.clone(
            CloneMethod.freeze, {self._environment: self._environment})

        # Define the learning rate
        lr_schedule = learning_rate_schedule(learning_rate, UnitType.minibatch)

        # AdamSGD optimizer
        m_schedule = momentum_schedule(momentum)
        vm_schedule = momentum_schedule(0.999)
        l_sgd = adam(self._model.parameters,
                     lr_schedule,
                     momentum=m_schedule,
                     unit_gain=True,
                     variance_momentum=vm_schedule)

        if self.distributed_training:
            raise NotImplementedError('ASGD not implemented yet.')

        # _actions is a sparse 1-hot encoding of the actions done by the agent
        q_acted = reduce_sum(self._model * self._actions, axis=0)

        # Define the trainer with Huber Loss function
        criterion = huber_loss(q_acted, self._q_targets, 1.0)

        self._learner = l_sgd
        self._trainer = Trainer(self._model, (criterion, None), l_sgd)
Exemplo n.º 6
0
classificationError = classification_error(outputLayer, labelsShape)

input_map = {
    featuresShape: reader.streams.features,
    labelsShape: reader.streams.labels
}

numOfEpochs = 10

printer = [ProgressPrinter(
    tag = 'Training',
    num_epochs = numOfEpochs)]

learningRate = learning_rate_schedule([0.1, 0.01, 0.001], UnitType.sample, 700)

trainer = Trainer(outputLayer,(crossEntropy, classificationError), [adadelta(outputLayer.parameters, learningRate)], printer)

minibatchSize = 50
numberOfSamples = 2208
numberOfSweepsForTraining = 10

trainingSession = training_session(
        trainer=trainer,
        mb_source=reader,
        mb_size=minibatchSize,
        model_inputs_to_streams=input_map,
        max_samples=numberOfSamples * numberOfSweepsForTraining,
        progress_frequency=numberOfSamples
    )

trainingSession.train()