예제 #1
0
        def __init__(self, nn_params):
            super(ImitativePolicy, self).__init__()

            # Store the parameters:
            self.hidden_w = nn_params['hid_width']
            self.depth = nn_params['hid_depth']

            self.n_in_input = nn_params['dx']
            self.n_out = nn_params['du']

            self.activation = nn_params['activation']
            self.d = nn_params['dropout']

            self.loss_fnc = nn.MSELoss()

            # super(ImitativePolicy, self).__init__()

            # Takes objects from the training parameters
            layers = []
            layers.append(nn.Linear(self.n_in_input,
                                    self.hidden_w))  # input layer
            layers.append(self.activation)
            layers.append(nn.Dropout(p=self.d))
            for d in range(self.depth):
                # add modules
                # input layer
                layers.append(nn.Linear(self.hidden_w, self.hidden_w))
                layers.append(self.activation)
                layers.append(nn.Dropout(p=self.d))

            # output layer
            layers.append(nn.Linear(self.hidden_w, self.n_out))
            self.features = nn.Sequential(*layers)

            # Need to scale the state variables again etc
            # inputs state, output an action (PWMs)
            self.scalarX = MinMaxScaler(feature_range=(-1, 1))
            self.scalarU = MinMaxScaler(feature_range=(-1, 1))
    def __init__(self, nn_params):

        super(GeneralNN, self).__init__()
        """
        Simpler implementation of my other neural net class. After parameter tuning, now just keep the structure and change it if needed. Note that the data passed into this network is only that which is used.
        """
        # Store the parameters:
        self.prob = nn_params['bayesian_flag']
        self.hidden_w = nn_params['hid_width']
        self.depth = nn_params['hid_depth']

        self.n_in_input = nn_params['du']
        self.n_in_state = nn_params['dx']
        self.n_in = self.n_in_input + self.n_in_state
        self.n_out = nn_params['dt']

        self.activation = nn_params['activation']
        self.d = nn_params['dropout']
        self.split_flag = nn_params['split_flag']

        self.E = 0      # clarify that these models are not ensembles

        # Can store with a helper function for when re-loading and figuring out what was trained on
        self.state_list = []
        self.input_list = []
        self.change_state_list = []

        self.scalarX = StandardScaler()# MinMaxScaler(feature_range=(-1,1))#StandardScaler()# RobustScaler()
        self.scalarU = MinMaxScaler(feature_range=(-1,1))
        self.scalardX = MinMaxScaler(feature_range=(-1,1)) #StandardScaler() #MinMaxScaler(feature_range=(-1,1))#StandardScaler() # RobustScaler(quantile_range=(25.0, 90.0))

        # Sets loss function
        if self.prob:
            # INIT max/minlogvar if PNN
            self.max_logvar = torch.nn.Parameter(torch.tensor(1*np.ones([1, self.n_out]),dtype=torch.float, requires_grad=True))
            self.min_logvar = torch.nn.Parameter(torch.tensor(-1*np.ones([1, self.n_out]),dtype=torch.float, requires_grad=True))
            self.loss_fnc = PNNLoss_Gaussian()
            self.n_out *= 2
        else:
            self.loss_fnc = nn.MSELoss()

        # If using split model, initiate here:
        if self.split_flag:
            self.features = nn.Sequential(
                SplitModel(self.n_in, self.n_out,
                    prob = self.prob,
                    width = self.hidden_w,
                    activation = self.activation,
                    dropout = self.d))
        else:
            # create object nicely
            layers = []
            layers.append(('dynm_input_lin', nn.Linear(
                self.n_in, self.hidden_w)))       # input layer
            layers.append(('dynm_input_act', self.activation))
            # layers.append(nn.Dropout(p=self.d))
            for d in range(self.depth):
                # add modules
                # input layer
                layers.append(
                    ('dynm_lin_'+str(d), nn.Linear(self.hidden_w, self.hidden_w)))
                layers.append(('dynm_act_'+str(d), self.activation))
                # layers.append(nn.Dropout(p=self.d))

            # output layer
            layers.append(('dynm_out_lin', nn.Linear(self.hidden_w, self.n_out)))
            # print(*layers)
            self.features = nn.Sequential(OrderedDict([*layers]))
예제 #3
0
temp_data_test = DataCSV_All('data/tempAMAL_test.csv', number_classes,
                             sequence_length)
data = DataLoader(temp_data_train,
                  batch_size=batch_size,
                  shuffle=True,
                  drop_last=True)
data_test = DataLoader(temp_data_test,
                       batch_size=1,
                       shuffle=False,
                       drop_last=True)

data_sizes = temp_data_train.data.size()
latent_size = 10

model = RNN(1, latent_size, 1)
loss = nn.MSELoss()
optim = torch.optim.Adam(model.parameters(), lr=10**-3)

iterations = 1000

#GPU
model.to(device)
loss.to(device)

writer = SummaryWriter()

for i in range(iterations):

    train_loss = 0
    nt = 0
    for x in data:
예제 #4
0
    def __init__(self, nn_params):

        super(GeneralNN, self).__init__()
        # Store the parameters:
        self.prob = nn_params['bayesian_flag']
        self.hidden_w = nn_params['hid_width']
        self.depth = nn_params['hid_depth']

        self.n_in_input = nn_params['du']
        self.n_in_state = nn_params['dx']
        self.n_in = self.n_in_input + self.n_in_state
        self.n_out = nn_params['dt']

        self.activation = nn_params['activation']
        self.d = nn_params['dropout']
        self.split_flag = nn_params['split_flag']

        self.epsilon = nn_params['epsilon']
        self.E = 0

        # Can store with a helper function for when re-loading and figuring out what was trained on
        self.state_list = []
        self.input_list = []
        self.change_state_list = []

        self.scalarX = StandardScaler(
        )  # MinMaxScaler(feature_range=(-1,1))#StandardScaler()# RobustScaler()
        self.scalarU = MinMaxScaler(feature_range=(-1, 1))
        self.scalardX = MinMaxScaler(
            feature_range=(-1, 1)
        )  #StandardScaler() #MinMaxScaler(feature_range=(-1,1))#StandardScaler() # RobustScaler(quantile_range=(25.0, 90.0))

        # Sets loss function
        if self.prob:
            # INIT max/minlogvar if PNN
            self.max_logvar = torch.nn.Parameter(
                torch.tensor(1 * np.ones([1, self.n_out]),
                             dtype=torch.float,
                             requires_grad=True))
            self.min_logvar = torch.nn.Parameter(
                torch.tensor(-1 * np.ones([1, self.n_out]),
                             dtype=torch.float,
                             requires_grad=True))
            self.loss_fnc = PNNLoss_Gaussian()
            self.n_out *= 2
        else:
            self.loss_fnc = nn.MSELoss()

        layers = []
        layers.append(
            ('dynm_input_lin', nn.Linear(self.n_in,
                                         self.hidden_w)))  # input layer
        layers.append(('dynm_input_act', self.activation))
        layers.append(('dynm_input_dropout', nn.Dropout(p=self.d)))
        for d in range(self.depth):
            layers.append(
                ('dynm_lin_' + str(d), nn.Linear(self.hidden_w,
                                                 self.hidden_w)))
            layers.append(('dynm_act_' + str(d), self.activation))
            layers.append(('dynm_dropout_' + str(d), nn.Dropout(p=self.d)))

        layers.append(('dynm_out_lin', nn.Linear(self.hidden_w, self.n_out)))
        self.features = nn.Sequential(OrderedDict([*layers]))