def init_params(self): ''' Initializes all of the model's parameters using xavier uniform initialization. Biases are all set to 0.01, except for the GRU's biases which are set to 0. ''' net_util.init_layers(self.layers, 'Linear') net_util.init_layers(self.layers, 'GRU')
def __init__(self, net_spec, in_dim, out_dim): ''' net_spec: hid_layers: list containing dimensions of the hidden layers hid_layers_activation: activation function for the hidden layers init_fn: weight initialization function clip_grad_val: clip gradient norm if value is not None loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_scheduler_spec: Pytorch optim.lr_scheduler update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing ''' nn.Module.__init__(self) super(MLPNet, self).__init__(net_spec, in_dim, out_dim) # set default util.set_attr(self, dict( init_fn=None, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'shared', 'hid_layers', 'hid_layers_activation', 'init_fn', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) dims = [self.in_dim] + self.hid_layers self.model = net_util.build_fc_model(dims, self.hid_layers_activation) # add last layer with no activation # tails. avoid list for single-tail for compute speed if ps.is_integer(self.out_dim): self.model_tail = nn.Linear(dims[-1], self.out_dim) else: self.model_tails = nn.ModuleList([nn.Linear(dims[-1], out_d) for out_d in self.out_dim]) net_util.init_layers(self, self.init_fn) for module in self.modules(): module.to(self.device) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_scheduler = net_util.get_lr_scheduler(self, self.lr_scheduler_spec)
def init_params(self): ''' Initializes all of the model's parameters using xavier uniform initialization. Note: There appears to be unreproduceable behaviours in pyTorch for xavier init Sometimes the trainable params tests pass (see nn_test.py), other times they dont. Biases are all set to 0.01 ''' net_util.init_layers(self.layers, 'Linear')
def __init__(self, net_spec, in_dim, out_dim): state_dim, action_dim = in_dim assert len(state_dim) == 3 # image shape (c,w,h) # conv body nn.Module.__init__(self) Net.__init__(self, net_spec, state_dim, out_dim) # set default util.set_attr( self, dict( out_layer_activation=None, init_fn=None, normalize=False, batch_norm=True, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'conv_hid_layers', 'fc_hid_layers', 'hid_layers_activation', 'out_layer_activation', 'init_fn', 'normalize', 'batch_norm', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # state conv model self.conv_model = self.build_conv_layers(self.conv_hid_layers) self.conv_out_dim = self.get_conv_output_size() # state fc model self.fc_model = net_util.build_fc_model( [self.conv_out_dim + action_dim] + self.fc_hid_layers, self.hid_layers_activation) # affine transformation applied to tail_in_dim = self.fc_hid_layers[-1] self.model_tail = net_util.build_fc_model([tail_in_dim, self.out_dim], self.out_layer_activation) net_util.init_layers(self, self.init_fn) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.to(self.device) self.train()
def init_params(self): ''' Initializes all of the model's parameters using xavier uniform initialization. Biases are all set to 0.01 ''' layers = [] for l in self.state_heads_layers: layers.extend(l) layers.extend(self.shared_layers) for l in self.action_heads_layers: layers.extend(l) net_util.init_layers(layers, 'Linear')
def __init__(self, net_spec, algorithm, in_dim, out_dim): nn.Module.__init__(self) Net.__init__(self, net_spec, algorithm, in_dim, out_dim) # set default util.set_attr( self, dict( clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'lr_anneal_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # Guard against inappropriate algorithms and environments assert net_util.is_q_learning(algorithm) # Build model body dims = [self.in_dim] + self.hid_layers self.model_body = net_util.build_sequential(dims, self.hid_layers_activation) # output layers self.v = nn.Linear(dims[-1], 1) # state value self.adv = nn.Linear(dims[-1], out_dim) # action dependent raw advantage net_util.init_layers(self.modules()) if torch.cuda.is_available() and self.gpu: for module in self.modules(): module.cuda() self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)
def __init__(self, net_spec, in_dim, out_dim): nn.Module.__init__(self) Net.__init__(self, net_spec, in_dim, out_dim) # set default util.set_attr( self, dict( init_fn=None, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'shared', 'hid_layers', 'hid_layers_activation', 'init_fn', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # Guard against inappropriate algorithms and environments # Build model body dims = [self.in_dim] + self.hid_layers self.model_body = net_util.build_sequential(dims, self.hid_layers_activation) # output layers self.v = nn.Linear(dims[-1], 1) # state value self.adv = nn.Linear(dims[-1], out_dim) # action dependent raw advantage net_util.init_layers(self, self.init_fn) for module in self.modules(): module.to(self.device) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_scheduler = net_util.get_lr_scheduler(self, self.lr_scheduler_spec)
def __init__(self, net_spec, algorithm, in_dim, out_dim): nn.Module.__init__(self) Net.__init__(self, net_spec, algorithm, in_dim, out_dim) # set default util.set_attr( self, dict( clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'lr_anneal_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) dims = [self.in_dim] + self.hid_layers self.model_body = net_util.build_sequential(dims, self.hid_layers_activation) # multi-tail output layer with mean and std self.model_tails = nn.ModuleList( [nn.Linear(dims[-1], out_d) for out_d in out_dim]) net_util.init_layers(self.modules()) if torch.cuda.is_available() and self.gpu: for module in self.modules(): module.cuda() self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)
def __init__(self, net_spec, in_dim, out_dim): state_dim, action_dim = in_dim nn.Module.__init__(self) Net.__init__(self, net_spec, in_dim, out_dim) # set default util.set_attr( self, dict( out_layer_activation=None, init_fn=None, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'shared', 'hid_layers', 'hid_layers_activation', 'out_layer_activation', 'init_fn', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) dims = [state_dim + action_dim] + self.hid_layers self.model = net_util.build_fc_model(dims, self.hid_layers_activation) # add last layer with no activation self.model_tail = net_util.build_fc_model([dims[-1], self.out_dim], self.out_layer_activation) net_util.init_layers(self, self.init_fn) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.to(self.device) self.train()
def init_params(self): ''' Initializes all of the model's parameters using xavier uniform initialization. Biases are all set to 0.01 ''' layers = self.conv_layers + self.flat_layers net_util.init_layers(layers, 'Linear') net_util.init_layers(layers, 'Conv') net_util.init_layers(layers, 'BatchNorm')
def __init__(self, net_spec, in_dim, out_dim): assert len(in_dim) == 3 # image shape (c,w,h) nn.Module.__init__(self) Net.__init__(self, net_spec, in_dim, out_dim) # set default util.set_attr( self, dict( init_fn=None, normalize=False, batch_norm=False, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'conv_hid_layers', 'fc_hid_layers', 'hid_layers_activation', 'init_fn', 'normalize', 'batch_norm', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # Guard against inappropriate algorithms and environments assert isinstance(out_dim, int) # conv body self.conv_model = self.build_conv_layers(self.conv_hid_layers) self.conv_out_dim = self.get_conv_output_size() # fc body if ps.is_empty(self.fc_hid_layers): tail_in_dim = self.conv_out_dim else: # fc layer from flattened conv self.fc_model = net_util.build_fc_model([self.conv_out_dim] + self.fc_hid_layers, self.hid_layers_activation) tail_in_dim = self.fc_hid_layers[-1] # tails. avoid list for single-tail for compute speed self.v = nn.Linear(tail_in_dim, 1) # state value self.adv = nn.Linear(tail_in_dim, out_dim) # action dependent raw advantage self.model_tails = nn.ModuleList([self.v, self.adv]) net_util.init_layers(self, self.init_fn) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.to(self.device) self.train()
def __init__(self, net_spec, in_dim, out_dim): ''' Multi state processing heads, single shared body, and multi action tails. There is one state and action head per body/environment Example: env 1 state env 2 state _______|______ _______|______ | head 1 | | head 2 | |______________| |______________| | | |__________________| ________________|_______________ | Shared body | |________________________________| | ________|_______ | | _______|______ ______|_______ | tail 1 | | tail 2 | |______________| |______________| | | env 1 action env 2 action ''' nn.Module.__init__(self) super().__init__(net_spec, in_dim, out_dim) # set default util.set_attr( self, dict( out_layer_activation=None, init_fn=None, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'out_layer_activation', 'init_fn', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) assert len( self.hid_layers ) == 3, 'Your hidden layers must specify [*heads], [body], [*tails]. If not, use MLPNet' assert isinstance(self.in_dim, list), 'Hydra network needs in_dim as list' assert isinstance(self.out_dim, list), 'Hydra network needs out_dim as list' self.head_hid_layers = self.hid_layers[0] self.body_hid_layers = self.hid_layers[1] self.tail_hid_layers = self.hid_layers[2] if len(self.head_hid_layers) == 1: self.head_hid_layers = self.head_hid_layers * len(self.in_dim) if len(self.tail_hid_layers) == 1: self.tail_hid_layers = self.tail_hid_layers * len(self.out_dim) self.model_heads = self.build_model_heads(in_dim) heads_out_dim = np.sum( [head_hid_layers[-1] for head_hid_layers in self.head_hid_layers]) dims = [heads_out_dim] + self.body_hid_layers self.model_body = net_util.build_fc_model(dims, self.hid_layers_activation) self.model_tails = self.build_model_tails(self.out_dim, self.out_layer_activation) net_util.init_layers(self, self.init_fn) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.to(self.device) self.train()
def __init__(self, net_spec, in_dim, out_dim): ''' net_spec: conv_hid_layers: list containing dimensions of the convolutional hidden layers, each is a list representing hid_layer = out_d, kernel, stride, padding, dilation. Asssumed to all come before the flat layers. Note: a convolutional layer should specify the in_channel, out_channels, kernel_size, stride (of kernel steps), padding, and dilation (spacing between kernel points) E.g. [3, 16, (5, 5), 1, 0, (2, 2)] For more details, see http://pytorch.org/docs/master/nn.html#conv2d and https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md fc_hid_layers: list of fc layers following the convolutional layers hid_layers_activation: activation function for the hidden layers out_layer_activation: activation function for the output layer, same shape as out_dim init_fn: weight initialization function normalize: whether to divide by 255.0 to normalize image input batch_norm: whether to add batch normalization after each convolutional layer, excluding the input layer. clip_grad_val: clip gradient norm if value is not None loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_scheduler_spec: Pytorch optim.lr_scheduler update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing ''' assert len(in_dim) == 3 # image shape (c,w,h) nn.Module.__init__(self) super().__init__(net_spec, in_dim, out_dim) # set default util.set_attr( self, dict( out_layer_activation=None, init_fn=None, normalize=False, batch_norm=True, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'conv_hid_layers', 'fc_hid_layers', 'hid_layers_activation', 'out_layer_activation', 'init_fn', 'normalize', 'batch_norm', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # conv body self.conv_model = self.build_conv_layers(self.conv_hid_layers) self.conv_out_dim = self.get_conv_output_size() # fc body if ps.is_empty(self.fc_hid_layers): tail_in_dim = self.conv_out_dim else: # fc body from flattened conv self.fc_model = net_util.build_fc_model([self.conv_out_dim] + self.fc_hid_layers, self.hid_layers_activation) tail_in_dim = self.fc_hid_layers[-1] # tails. avoid list for single-tail for compute speed if ps.is_integer(self.out_dim): self.model_tail = net_util.build_fc_model( [tail_in_dim, self.out_dim], self.out_layer_activation) else: if not ps.is_list(self.out_layer_activation): self.out_layer_activation = [self.out_layer_activation ] * len(out_dim) assert len(self.out_layer_activation) == len(self.out_dim) tails = [] for out_d, out_activ in zip(self.out_dim, self.out_layer_activation): tail = net_util.build_fc_model([tail_in_dim, out_d], out_activ) tails.append(tail) self.model_tails = nn.ModuleList(tails) net_util.init_layers(self, self.init_fn) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.to(self.device) self.train()
def __init__(self, net_spec, algorithm, in_dim, out_dim): ''' net_spec: hid_layers: list with tuple consisting of two elements. (conv_hid, flat_hid) Note: tuple must contain two elements, use empty list if no such layers. 1. conv_hid: list containing dimensions of the convolutional hidden layers. Asssumed to all come before the flat layers. Note: a convolutional layer should specify the in_channel, out_channels, kernel_size, stride (of kernel steps), padding, and dilation (spacing between kernel points) E.g. [3, 16, (5, 5), 1, 0, (2, 2)] For more details, see http://pytorch.org/docs/master/nn.html#conv2d and https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md 2. flat_hid: list of dense layers following the convolutional layers hid_layers_activation: activation function for the hidden layers batch_norm: whether to add batch normalization after each convolutional layer, excluding the input layer. clip_grad: whether to clip the gradient clip_grad_val: the clip value loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_decay: function to decay learning rate lr_decay_frequency: how many total timesteps per decay lr_decay_min_timestep: minimum amount of total timesteps before starting decay lr_anneal_timestep: timestep to anneal lr decay update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing ''' # OpenAI gym provides images as W x H x C, pyTorch expects C x W x H in_dim = np.roll(in_dim, 1) # use generic multi-output for Convnet out_dim = np.reshape(out_dim, -1).tolist() nn.Module.__init__(self) super(ConvNet, self).__init__(net_spec, algorithm, in_dim, out_dim) # set default util.set_attr(self, dict( batch_norm=True, clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'batch_norm', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'lr_anneal_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) self.conv_hid_layers = self.hid_layers[0] self.dense_hid_layers = self.hid_layers[1] # conv layer self.conv_model = self.build_conv_layers(self.conv_hid_layers) # fc layer from flattened conv self.dense_model = self.build_dense_layers(self.dense_hid_layers) # tails tail_in_dim = self.dense_hid_layers[-1] if len(self.dense_hid_layers) > 0 else self.conv_out_dim self.model_tails = nn.ModuleList([nn.Linear(tail_in_dim, out_d) for out_d in self.out_dim]) net_util.init_layers(self.modules()) if torch.cuda.is_available() and self.gpu: for module in self.modules(): module.cuda() self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)
def __init__(self, net_spec, algorithm, in_dim, out_dim): ''' net_spec: hid_layers: list containing dimensions of the hidden layers. The last element of the list is should be the dimension of the hidden state for the recurrent layer. The other elements in the list are the dimensions of the MLP (if desired) which is to transform the state space. hid_layers_activation: activation function for the state_proc hidden layers rnn_hidden_size: rnn hidden_size rnn_num_layers: number of recurrent layers seq_len: length of the history of being passed to the net clip_grad: whether to clip the gradient clip_grad_val: the clip value loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_decay: function to decay learning rate lr_decay_frequency: how many total timesteps per decay lr_decay_min_timestep: minimum amount of total timesteps before starting decay update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing ''' # use generic multi-output for RNN out_dim = np.reshape(out_dim, -1).tolist() nn.Module.__init__(self) super(RecurrentNet, self).__init__(net_spec, algorithm, in_dim, out_dim) # set default util.set_attr( self, dict( rnn_num_layers=1, clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'rnn_hidden_size', 'rnn_num_layers', 'seq_len', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # state processing model state_proc_dims = [self.in_dim] + self.hid_layers self.state_proc_model = net_util.build_sequential( state_proc_dims, self.hid_layers_activation) # RNN model self.rnn_input_dim = state_proc_dims[-1] self.rnn_model = nn.GRU(input_size=self.rnn_input_dim, hidden_size=self.rnn_hidden_size, num_layers=self.rnn_num_layers, batch_first=True) # tails self.model_tails = nn.ModuleList( [nn.Linear(self.rnn_hidden_size, out_d) for out_d in self.out_dim]) net_util.init_layers(self.modules()) if torch.cuda.is_available() and self.gpu: for module in self.modules(): module.cuda() self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)
def __init__(self, net_spec, in_dim, out_dim): ''' net_spec: cell_type: any of RNN, LSTM, GRU fc_hid_layers: list of fc layers preceeding the RNN layers hid_layers_activation: activation function for the fc hidden layers out_layer_activation: activation function for the output layer, same shape as out_dim rnn_hidden_size: rnn hidden_size rnn_num_layers: number of recurrent layers bidirectional: if RNN should be bidirectional seq_len: length of the history of being passed to the net init_fn: weight initialization function clip_grad_val: clip gradient norm if value is not None loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_scheduler_spec: Pytorch optim.lr_scheduler update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing ''' nn.Module.__init__(self) super(RecurrentNet, self).__init__(net_spec, in_dim, out_dim) # set default util.set_attr( self, dict( out_layer_activation=None, cell_type='GRU', rnn_num_layers=1, bidirectional=False, init_fn=None, clip_grad_val=None, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_scheduler_spec=None, update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'cell_type', 'fc_hid_layers', 'hid_layers_activation', 'out_layer_activation', 'rnn_hidden_size', 'rnn_num_layers', 'bidirectional', 'seq_len', 'init_fn', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_scheduler_spec', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # fc body: state processing model if ps.is_empty(self.fc_hid_layers): self.rnn_input_dim = self.in_dim else: fc_dims = [self.in_dim] + self.fc_hid_layers self.fc_model = net_util.build_fc_model(fc_dims, self.hid_layers_activation) self.rnn_input_dim = fc_dims[-1] # RNN model self.rnn_model = getattr(nn, net_util.get_nn_name(self.cell_type))( input_size=self.rnn_input_dim, hidden_size=self.rnn_hidden_size, num_layers=self.rnn_num_layers, batch_first=True, bidirectional=self.bidirectional) # tails. avoid list for single-tail for compute speed if ps.is_integer(self.out_dim): self.model_tail = net_util.build_fc_model( [self.rnn_hidden_size, self.out_dim], self.out_layer_activation) else: if not ps.is_list(self.out_layer_activation): self.out_layer_activation = [self.out_layer_activation ] * len(out_dim) assert len(self.out_layer_activation) == len(self.out_dim) tails = [] for out_d, out_activ in zip(self.out_dim, self.out_layer_activation): tail = net_util.build_fc_model([self.rnn_hidden_size, out_d], out_activ) tails.append(tail) self.model_tails = nn.ModuleList(tails) net_util.init_layers(self, self.init_fn) for module in self.modules(): module.to(self.device) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_scheduler = net_util.get_lr_scheduler(self, self.lr_scheduler_spec)
def __init__(self, net_spec, algorithm, in_dim, out_dim): ''' Multi state processing heads, single shared body, and multi action tails. There is one state and action head per body/environment Example: env 1 state env 2 state _______|______ _______|______ | head 1 | | head 2 | |______________| |______________| | | |__________________| ________________|_______________ | Shared body | |________________________________| | ________|_______ | | _______|______ ______|_______ | tail 1 | | tail 2 | |______________| |______________| | | env 1 action env 2 action ''' nn.Module.__init__(self) super(HydraMLPNet, self).__init__(net_spec, algorithm, in_dim, out_dim) # set default util.set_attr( self, dict( clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) assert len( self.hid_layers ) == 3, 'Your hidden layers must specify [*heads], [body], [*tails]. If not, use MLPHeterogenousTails' assert isinstance(self.in_dim, list), 'Hydra network needs in_dim as list' assert isinstance(self.out_dim, list), 'Hydra network needs out_dim as list' self.head_hid_layers = self.hid_layers[0] self.body_hid_layers = self.hid_layers[1] self.tail_hid_layers = self.hid_layers[2] if len(self.head_hid_layers) == 1: self.head_hid_layers = self.head_hid_layers * len(self.in_dim) if len(self.tail_hid_layers) == 1: self.tail_hid_layers = self.tail_hid_layers * len(self.out_dim) self.model_heads = self.build_model_heads(in_dim) heads_out_dim = np.sum( [head_hid_layers[-1] for head_hid_layers in self.head_hid_layers]) dims = [heads_out_dim] + self.body_hid_layers self.model_body = net_util.build_sequential(dims, self.hid_layers_activation) self.model_tails = self.build_model_tails(out_dim) net_util.init_layers(self.modules()) if torch.cuda.is_available() and self.gpu: for module in self.modules(): module.cuda() self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)
def __init__(self, net_spec, algorithm, in_dim, out_dim): ''' net_spec: hid_layers: list containing dimensions of the hidden layers hid_layers_activation: activation function for the hidden layers clip_grad: whether to clip the gradient clip_grad_val: the clip value loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_decay: function to decay learning rate lr_decay_frequency: how many total timesteps per decay lr_decay_min_timestep: minimum amount of total timesteps before starting decay update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing e.g. net_spec "net": { "type": "MLPNet", "hid_layers": [32], "hid_layers_activation": "relu", "clip_grad": false, "clip_grad_val": 1.0, "loss_spec": { "name": "MSELoss" }, "optim_spec": { "name": "Adam", "lr": 0.02 }, "lr_decay": "rate_decay", "lr_decay_frequency": 500, "lr_decay_min_timestep": 1000, "update_type": "replace", "update_frequency": 1, "polyak_coef": 0.9, "gpu": true } ''' nn.Module.__init__(self) super(MLPNet, self).__init__(net_spec, algorithm, in_dim, out_dim) # set default util.set_attr( self, dict( clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) dims = [self.in_dim] + self.hid_layers self.model = net_util.build_sequential(dims, self.hid_layers_activation) # add last layer with no activation self.model.add_module(str(len(self.model)), nn.Linear(dims[-1], self.out_dim)) net_util.init_layers(self.modules()) if torch.cuda.is_available() and self.gpu: for module in self.modules(): module.cuda() self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)
def __init__(self, net_spec, in_dim, out_dim): ''' net_spec: hid_layers: list containing dimensions of the hidden layers hid_layers_activation: activation function for the hidden layers init_fn: weight initialization function clip_grad: whether to clip the gradient clip_grad_val: the clip value loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_decay: function to decay learning rate lr_decay_frequency: how many total timesteps per decay lr_decay_min_timestep: minimum amount of total timesteps before starting decay lr_anneal_timestep: timestep to anneal lr decay update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing ''' nn.Module.__init__(self) super(MLPNet, self).__init__(net_spec, in_dim, out_dim) # set default util.set_attr(self, dict( init_fn='xavier_uniform_', clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'separate', 'hid_layers', 'hid_layers_activation', 'init_fn', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'lr_anneal_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) dims = [self.in_dim] + self.hid_layers self.model = net_util.build_sequential(dims, self.hid_layers_activation) # add last layer with no activation if ps.is_integer(self.out_dim): self.model.add_module(str(len(self.model)), nn.Linear(dims[-1], self.out_dim)) else: # if more than 1 output, add last layer as tails separate from main model self.model_tails = nn.ModuleList([nn.Linear(dims[-1], out_d) for out_d in self.out_dim]) net_util.init_layers(self, self.init_fn) for module in self.modules(): module.to(self.device) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)
def __init__(self, net_spec, in_dim, out_dim): ''' net_spec: hid_layers: list with tuple consisting of two elements. (conv_hid, flat_hid) Note: tuple must contain two elements, use empty list if no such layers. 1. conv_hid: list containing dimensions of the convolutional hidden layers. Asssumed to all come before the flat layers. Note: a convolutional layer should specify the in_channel, out_channels, kernel_size, stride (of kernel steps), padding, and dilation (spacing between kernel points) E.g. [3, 16, (5, 5), 1, 0, (2, 2)] For more details, see http://pytorch.org/docs/master/nn.html#conv2d and https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md 2. flat_hid: list of dense layers following the convolutional layers hid_layers_activation: activation function for the hidden layers init_fn: weight initialization function batch_norm: whether to add batch normalization after each convolutional layer, excluding the input layer. clip_grad: whether to clip the gradient clip_grad_val: the clip value loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_decay: function to decay learning rate lr_decay_frequency: how many total timesteps per decay lr_decay_min_timestep: minimum amount of total timesteps before starting decay lr_anneal_timestep: timestep to anneal lr decay update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing ''' # OpenAI gym provides images as W x H x C, pyTorch expects C x W x H in_dim = np.roll(in_dim, 1) # use generic multi-output for Convnet out_dim = np.reshape(out_dim, -1).tolist() nn.Module.__init__(self) super(ConvNet, self).__init__(net_spec, in_dim, out_dim) # set default util.set_attr( self, dict( init_fn='xavier_uniform_', batch_norm=True, clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'init_fn', 'batch_norm', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'lr_anneal_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) self.conv_hid_layers = self.hid_layers[0] self.dense_hid_layers = self.hid_layers[1] # conv layer self.conv_model = self.build_conv_layers(self.conv_hid_layers) # fc layer from flattened conv self.dense_model = self.build_dense_layers(self.dense_hid_layers) # tails tail_in_dim = self.dense_hid_layers[-1] if len( self.dense_hid_layers) > 0 else self.conv_out_dim self.model_tails = nn.ModuleList( [nn.Linear(tail_in_dim, out_d) for out_d in self.out_dim]) net_util.init_layers(self, self.init_fn) for module in self.modules(): module.to(self.device) self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)
def __init__(self, net_spec, algorithm, in_dim, out_dim): ''' net_spec: hid_layers: list containing dimensions of the hidden layers. The last element of the list is should be the dimension of the hidden state for the recurrent layer. The other elements in the list are the dimensions of the MLP (if desired) which is to transform the state space. hid_layers_activation: activation function for the state_proc hidden layers rnn_hidden_size: rnn hidden_size rnn_num_layers: number of recurrent layers seq_len: length of the history of being passed to the net clip_grad: whether to clip the gradient clip_grad_val: the clip value loss_spec: measure of error between model predictions and correct outputs optim_spec: parameters for initializing the optimizer lr_decay: function to decay learning rate lr_decay_frequency: how many total timesteps per decay lr_decay_min_timestep: minimum amount of total timesteps before starting decay lr_anneal_timestep: timestep to anneal lr decay update_type: method to update network weights: 'replace' or 'polyak' update_frequency: how many total timesteps per update polyak_coef: ratio of polyak weight update gpu: whether to train using a GPU. Note this will only work if a GPU is available, othewise setting gpu=True does nothing ''' # use generic multi-output for RNN out_dim = np.reshape(out_dim, -1).tolist() nn.Module.__init__(self) super(RecurrentNet, self).__init__(net_spec, algorithm, in_dim, out_dim) # set default util.set_attr(self, dict( rnn_num_layers=1, clip_grad=False, clip_grad_val=1.0, loss_spec={'name': 'MSELoss'}, optim_spec={'name': 'Adam'}, lr_decay='no_decay', update_type='replace', update_frequency=1, polyak_coef=0.0, gpu=False, )) util.set_attr(self, self.net_spec, [ 'hid_layers', 'hid_layers_activation', 'rnn_hidden_size', 'rnn_num_layers', 'seq_len', 'clip_grad', 'clip_grad_val', 'loss_spec', 'optim_spec', 'lr_decay', 'lr_decay_frequency', 'lr_decay_min_timestep', 'lr_anneal_timestep', 'update_type', 'update_frequency', 'polyak_coef', 'gpu', ]) # state processing model state_proc_dims = [self.in_dim] + self.hid_layers self.state_proc_model = net_util.build_sequential(state_proc_dims, self.hid_layers_activation) # RNN model self.rnn_input_dim = state_proc_dims[-1] self.rnn_model = nn.GRU( input_size=self.rnn_input_dim, hidden_size=self.rnn_hidden_size, num_layers=self.rnn_num_layers, batch_first=True) # tails self.model_tails = nn.ModuleList([nn.Linear(self.rnn_hidden_size, out_d) for out_d in self.out_dim]) net_util.init_layers(self.modules()) if torch.cuda.is_available() and self.gpu: for module in self.modules(): module.cuda() self.loss_fn = net_util.get_loss_fn(self, self.loss_spec) self.optim = net_util.get_optim(self, self.optim_spec) self.lr_decay = getattr(net_util, self.lr_decay)