示例#1
0
import torch
from neural_net import NeuralNet
from load_policy import *
import gym
import torch.nn as nn
import torch.optim as optim

network = NeuralNet(4,128,64,2)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(network.parameters(),0.01,0.9)

losses = []
iter = []
for i in range(20):
    state,action = get_batch()
    for j in range(state.size()[0]):
        network.zero_grad()
        output = network(state[j])
        loss = criterion(output,action[j])
        if i%10==0:
            losses.append(loss)
            iter.append(i)
        loss.backward()
        optimizer.step()
    print(loss)

torch.save(network.state_dict(),"mymodel.pt")
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from neural_net import NeuralNet

number_of_epochs = 100

# CUDA for PyTorch
use_cuda = torch.cuda.is_available()  #checking if gpu available
print(torch.cuda.get_device_name())
device = torch.device("cuda" if use_cuda else "cpu")  #if no gpu then use cpu

neural_net = NeuralNet()
neural_net.train()

optimizer = optim.SGD(neural_net.parameters(), lr=0.01, momentum=0.5)
loss_function = nn.MSELoss()

fp = open("dataset.txt", "r")

for i in range(number_of_epochs):
    for lines in fp:
        data_per_episode = ast.literal_eval(lines.strip())
        print("total number of steps {}".format(len(data_per_episode)))
        for steps in data_per_episode:
            data = steps[0]
            data.append(steps[1])

            gt_action = np.asarray(steps[2])
            data = np.asarray(data)
            optimizer.zero_grad()
示例#3
0
assert image_size is not None, 'Image size can not be None'
assert number_classes is not None, 'Total number of classes can not be None'

num_input_channels = image_size[0]

print('> Constructing the network')
# construct the network
cnn = NeuralNet(num_conv_layers, num_full_layers, list_param_conv_layers,
                list_param_full_layers, dropout_rate, activation,
                image_size[1], number_classes, num_input_channels)

cnn.to(device)

try:
    if optimizer_choice == 1:
        optimizer = optim.SGD(cnn.parameters(),
                              lr=arg1,
                              momentum=arg2,
                              weight_decay=arg3,
                              dampening=arg4)
    if optimizer_choice == 2:
        optimizer = optim.Adam(cnn.parameters(),
                               lr=arg1,
                               betas=(arg2, arg3),
                               weight_decay=arg4)
    if optimizer_choice == 3:
        optimizer = optim.Adagrad(cnn.parameters(),
                                  lr=arg1,
                                  lr_decay=arg2,
                                  weight_decay=arg4,
                                  initial_accumulator_value=arg3)