예제 #1
0
    def __init__(self, hidden_dim, buffer_size, gamma, batch_size, device, writer):
        self.env = make("connectx", debug=True)
        self.device = device
        self.policy = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                          self.env.configuration.columns).to(
            device)

        self.target = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                          self.env.configuration.columns).to(
            device)
        self.enemyNet = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                            self.env.configuration.columns).to(
            device)
        self.target.load_state_dict(self.policy.state_dict())
        self.target.eval()
        self.buffer = ExperienceReplay(buffer_size)
        self.enemy = "random"
        self.trainingPair = self.env.train([None, self.enemy])
        self.loss_function = nn.MSELoss()
        self.optimizer = optim.Adam(params=self.policy.parameters(), lr=0.001)
        self.gamma = gamma
        self.batch_size = batch_size

        self.first = True
        self.player = 1
        self.writer = writer
예제 #2
0
    def __init__(self, thymio_name):
        """init"""
        self.net = Net()
        self.net.load_state_dict(torch.load('./cnn', 'cpu'))
        self.net.eval()
        self.thymio_name = thymio_name
        self.transform = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.ToTensor()
            ])
        rospy.init_node('basic_thymio_controller', anonymous=True)
        time.sleep(5)

        self.velocity_publisher = rospy.Publisher(self.thymio_name + '/cmd_vel',
                                                  Twist, queue_size=10)
        self.pose_subscriber = rospy.Subscriber(self.thymio_name + '/odom',
                                                Odometry, self.update_state)

        self.camera_subscriber = rospy.Subscriber(self.thymio_name + '/camera/image_raw',
                                                  Image, self.update_image, queue_size=1)

        self.current_pose = Pose()
        self.current_twist = Twist()
        self.rate = rospy.Rate(10)
def init_para():
    start_time = time.perf_counter()
    try:
        print('===> Find the para_saved file')
        open(PARA_SAVE_PATH)
    except FileNotFoundError:
        print('===> The para_saved file Not exist, creating new one...')
        train_dataset_list = txt_to_path_list(TRAIN_DATA_PATH)
        random.shuffle(train_dataset_list)
        torch.save({
            'epoch': EPOCH,
            'batch_counter': BATCH_COUNTER,
            'lr': LR,
            'optimizer param_groups': torch.optim.Adam(Net().to(DEVICE).parameters(),
                                                       lr=LR,
                                                       betas=(0.9, 0.999),
                                                       eps=1e-08,
                                                       amsgrad=True).state_dict()['param_groups'][0],
            'train_dataset_list': train_dataset_list,
            'loss_list': [],
            'result_list': [],
            'hard_cases_list': [],
            'best_result': [0, 0]
        }, PARA_SAVE_PATH)
        print('==> Done with initialization!')
    finally:
        print('===> Init_para used time: ', time.perf_counter() - start_time)
예제 #4
0
    def __call__(self, draw = False):
        
        # Load model
        net = Net()
        # Trained model
        net.load_state_dict(torch.load('./saved_models/model_checkpoint_kpd.pt')['model'])
        ## print out your net and prepare it for testing (uncomment the line below)
        net.eval()
        
        ## Data preparation
        transformations = transforms.Compose([Rescale(250),
                                             RandomCrop(224),
                                             Normalize(),
                                             ToTensor()])

        # create the transformed dataset
        transformed_data = KeypointsIterableDataset(self.image, self.keypoints, transform=transformations)
        data_loader = DataLoader(transformed_data, num_workers=0)
        
        ## if train flag is set, Start training picking up old checkpoint
        if self.train:
            print("Training...")
            ## Run each record twice for training.
            n_epochs = 2
            train_net(n_epochs, data_loader, net)
        
        ## Get the prediction
        print("Predicting...")
        test_images, test_pts, gt_pts, sample = test_net(data_loader, net)
    
        if draw:
            visualize_output(test_images, test_pts)
        # Rescaled.
        return InverseTransform()(sample)
def generate_experiment(method='FGSM'):

    # define your model and load pretrained weights
    # TODO
    # model = ...
    model = Net()
    model = model.load_state_dict(
        torch.load("/content/drive/My Drive/Colab Notebooks/model64"))

    # cinic class names
    import yaml
    with open('./cinic_classnames.yml', 'r') as fp:
        classnames = yaml.safe_load(fp)

    # load image
    # TODO:
    # img_path = Path(....)
    img_path = Path("/content/test/")
    input_img = Image.open(img_path / "airplane/cifar10-test-10.png")

    # define normalizer and un-normalizer for images
    # cinic
    mean = [0.47889522, 0.47227842, 0.43047404]
    std = [0.24205776, 0.23828046, 0.25874835]

    tf_img = transforms.Compose([
        # transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std)
    ])
    un_norm = transforms.Compose([
        transforms.Normalize(mean=[-m / s for m, s in zip(mean, std)],
                             std=[1 / s for s in std]),
        Clamp(),
        transforms.ToPILImage()
    ])

    # To be used for iterative method
    # to ensure staying within Linf limits
    clip_min = min([-m / s for m, s in zip(mean, std)])
    clip_max = max([(1 - m) / s for m, s in zip(mean, std)])

    input_tensor = tf_img(input_img)
    attacker = AdversialAttacker(method=method)

    return {
        'img': input_img,
        'inp': input_tensor.unsqueeze(0),
        'attacker': attacker,
        'mdl': model,
        'clip_min': clip_min,
        'clip_max': clip_max,
        'un_norm': un_norm,
        'classnames': classnames
    }
예제 #6
0
def loading_model(model_path=BEST_MODEL_SAVE_PATH):
    model = Net(resnet_level=24).to(DEVICE)
    try:
        print('===> Loading the saved model...')
        model.load_state_dict(torch.load(model_path, map_location=DEVICE))
        return model
    except FileNotFoundError:
        print('===> Loading the saved model fail, create a new one...')
        return model
    finally:
        pass
def loading_model():
    if torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')
    model = Net(resnet_level=24).to(DEVICE)
    try:
        print('===> Loading the saved model...')
        model.load_state_dict(torch.load(MODEL_SAVE_PATH, map_location=DEVICE))
        return model
    except FileNotFoundError:
        print('===> Loading the saved model fail, create a new one...')
        return model
    finally:
        pass
예제 #8
0
    def __init__(self, model_file="baseline1.pth"):
        # You should 
        #       1. create the model object
        #       2. load your state_dict
        #       3. call cuda()

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = Net()
        self.mdl = NetObj()
        self.modelObj = self.mdl.model
        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            self.model = nn.DataParallel(self.model)
            self.modelObj = nn.DataParallel(self.modelObj)
        checkpoint = torch.load("baseline1.pth")
        self.state_dict_1 = checkpoint['modelRoadMap_state_dict']
        self.state_dict_2 = checkpoint['modelObjectDetection_state_dict']
        self.model.load_state_dict(self.state_dict_1)
        self.modelObj.load_state_dict(self.state_dict_2)
        self.model.eval()
        self.modelObj.eval()
        self.model.to(device)
        self.modelObj.to(device)
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from Model import Net

model = Net(10)  #10 classes

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

from dataset import MeshData

DataObject = MeshData(
    '/home/prathmesh/Desktop/SoC-2020/ModelNet10_stl/ModelNet10')
dataLoad = torch.utils.data.DataLoader(DataObject, batch_size=1, shuffle=True)
batch = next(iter(dataLoad))
print(len(batch))

max_epochs = 30
loss_list = []
for epochs in range(max_epochs):
    #print('e =',epochs)
    running_loss = 0.0
    for i, data in enumerate(dataLoad, 0):
        x, y = data
        x = x[0].float().to(device)
        #y = y.float()
        y = y[0].to(device)
        optimizer.zero_grad()
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from Model import Net
from modelobj import NetObj
import numpy as np


model = Net()
mdl = NetObj()
modelObj = mdl.model

checkpoint = torch.load("modelobj/fasterrcnn_model_19.pth")
checkpoint1 = torch.load("model1/resnet18_model2_20.pth")
state_dict_1 = checkpoint1['modelRoadMap_state_dict']
state_dict_2 = checkpoint['modelObjectDetection_state_dict']
model.load_state_dict(state_dict_1)
modelObj.load_state_dict(state_dict_2)
PATH = "baseline1.pth"
torch.save({'modelRoadMap_state_dict': state_dict_1, 'modelObjectDetection_state_dict': state_dict_2},PATH)


예제 #11
0
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
    np.random.seed(random_seed)
    np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)

train_load = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         sampler=train_sampler)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
net = Net()
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.7)

# Training
for epoch in range(5):
    for i, data in enumerate(train_load, 0):
        inputs, labels, name = data
        inputs = inputs.float()
        inputs, labels = inputs.to(device), labels.to(device)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
예제 #12
0
                                   shuffle=False)
    test_loader = data.DataLoader(test_dataset,
                                  batch_size=BATCH_SIZE,
                                  shuffle=False)

    loss_total = []
    acc_total = []
    pred_total = []
    true_total = []

    for i in tqdm(range(TOTAL)):
        image_shape = full_dataset.x_data.shape[1:]

        device = torch.device(CUDA_N if torch.cuda.is_available() else 'cpu')
        torch.manual_seed(SEED[i])
        net = Net(image_shape, NUM_CLASS)
        net.to(device)
        print(net)

        softmax = nn.Softmax()
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.1)

        loss_list = []
        train_acc_list = []
        test_acc_list = []

        pred_temp = []
        true_temp = []

        for epoch in range(EPOCH):
예제 #13
0
    img = img / 2 + 0.5  # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()


dataiter = iter(train_loader)
images, labels = dataiter.next()

imshow(torchvision.utils.make_grid(images))
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))

#=============Defining Training Parameters and type of Device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

model = Net(out_fea=len(classes))
model = model.train()
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

step = 0
loss_train = []
loss_val = []

min_loss = 100
patience = 5
training_loss_store = []
validation_loss_store = []

writer = SummaryWriter('writer')
예제 #14
0
    if type(m) == nn.Conv2d:
        nn.init.xavier_uniform_(m.weight.data,
                                gain=nn.init.calculate_gain('conv2d'))


"""
Initialize network and relevant variables
"""
root_dir = r'data/'

num_epoch = 2

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Using device:', device)

net = Net().to(device)
net.apply(init_weights)
"""
Load training data
"""
from torch.utils.data import DataLoader
from torchvision import transforms

composed = transforms.Compose([Normalize(), ToTensor()])

trainset = ImagesDataset(csv_file=root_dir + 'train.csv',
                         root_dir=root_dir,
                         transform=composed)
trainloader = DataLoader(trainset, batch_size=4, shuffle=True, num_workers=4)
"""
Train data
예제 #15
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("data/train.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)