Exemplo n.º 1
0
    def __init__(self, thymio_name):
        """init"""
        self.net = Net()
        self.net.load_state_dict(torch.load('./cnn', 'cpu'))
        self.net.eval()
        self.thymio_name = thymio_name
        self.transform = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.ToTensor()
            ])
        rospy.init_node('basic_thymio_controller', anonymous=True)
        time.sleep(5)

        self.velocity_publisher = rospy.Publisher(self.thymio_name + '/cmd_vel',
                                                  Twist, queue_size=10)
        self.pose_subscriber = rospy.Subscriber(self.thymio_name + '/odom',
                                                Odometry, self.update_state)

        self.camera_subscriber = rospy.Subscriber(self.thymio_name + '/camera/image_raw',
                                                  Image, self.update_image, queue_size=1)

        self.current_pose = Pose()
        self.current_twist = Twist()
        self.rate = rospy.Rate(10)
Exemplo n.º 2
0
    def __init__(self, hidden_dim, buffer_size, gamma, batch_size, device, writer):
        self.env = make("connectx", debug=True)
        self.device = device
        self.policy = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                          self.env.configuration.columns).to(
            device)

        self.target = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                          self.env.configuration.columns).to(
            device)
        self.enemyNet = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                            self.env.configuration.columns).to(
            device)
        self.target.load_state_dict(self.policy.state_dict())
        self.target.eval()
        self.buffer = ExperienceReplay(buffer_size)
        self.enemy = "random"
        self.trainingPair = self.env.train([None, self.enemy])
        self.loss_function = nn.MSELoss()
        self.optimizer = optim.Adam(params=self.policy.parameters(), lr=0.001)
        self.gamma = gamma
        self.batch_size = batch_size

        self.first = True
        self.player = 1
        self.writer = writer
def generate_experiment(method='FGSM'):

    # define your model and load pretrained weights
    # TODO
    # model = ...
    model = Net()
    model = model.load_state_dict(
        torch.load("/content/drive/My Drive/Colab Notebooks/model64"))

    # cinic class names
    import yaml
    with open('./cinic_classnames.yml', 'r') as fp:
        classnames = yaml.safe_load(fp)

    # load image
    # TODO:
    # img_path = Path(....)
    img_path = Path("/content/test/")
    input_img = Image.open(img_path / "airplane/cifar10-test-10.png")

    # define normalizer and un-normalizer for images
    # cinic
    mean = [0.47889522, 0.47227842, 0.43047404]
    std = [0.24205776, 0.23828046, 0.25874835]

    tf_img = transforms.Compose([
        # transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std)
    ])
    un_norm = transforms.Compose([
        transforms.Normalize(mean=[-m / s for m, s in zip(mean, std)],
                             std=[1 / s for s in std]),
        Clamp(),
        transforms.ToPILImage()
    ])

    # To be used for iterative method
    # to ensure staying within Linf limits
    clip_min = min([-m / s for m, s in zip(mean, std)])
    clip_max = max([(1 - m) / s for m, s in zip(mean, std)])

    input_tensor = tf_img(input_img)
    attacker = AdversialAttacker(method=method)

    return {
        'img': input_img,
        'inp': input_tensor.unsqueeze(0),
        'attacker': attacker,
        'mdl': model,
        'clip_min': clip_min,
        'clip_max': clip_max,
        'un_norm': un_norm,
        'classnames': classnames
    }
Exemplo n.º 4
0
def loading_model(model_path=BEST_MODEL_SAVE_PATH):
    model = Net(resnet_level=24).to(DEVICE)
    try:
        print('===> Loading the saved model...')
        model.load_state_dict(torch.load(model_path, map_location=DEVICE))
        return model
    except FileNotFoundError:
        print('===> Loading the saved model fail, create a new one...')
        return model
    finally:
        pass
Exemplo n.º 5
0
class BasicThymio:

    def __init__(self, thymio_name):
        """init"""
        self.net = Net()
        self.net.load_state_dict(torch.load('./cnn', 'cpu'))
        self.net.eval()
        self.thymio_name = thymio_name
        self.transform = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.ToTensor()
            ])
        rospy.init_node('basic_thymio_controller', anonymous=True)
        time.sleep(5)

        self.velocity_publisher = rospy.Publisher(self.thymio_name + '/cmd_vel',
                                                  Twist, queue_size=10)
        self.pose_subscriber = rospy.Subscriber(self.thymio_name + '/odom',
                                                Odometry, self.update_state)

        self.camera_subscriber = rospy.Subscriber(self.thymio_name + '/camera/image_raw',
                                                  Image, self.update_image, queue_size=1)

        self.current_pose = Pose()
        self.current_twist = Twist()
        self.rate = rospy.Rate(10)

    def thymio_state_service_request(self, position, orientation):
        """Request the service (set thymio state values) exposed by
        the simulated thymio. A teleportation tool, by default in gazebo world frame.
        Be aware, this does not mean a reset (e.g. odometry values)."""
        rospy.wait_for_service('/gazebo/set_model_state')
        try:
            model_state = ModelState()
            model_state.model_name = self.thymio_name
            model_state.reference_frame = ''  # the frame for the pose information
            model_state.pose.position.x = position[0]
            model_state.pose.position.y = position[1]
            model_state.pose.position.z = position[2]
            qto = quaternion_from_euler(
                orientation[0], orientation[1], orientation[2], axes='sxyz')
            model_state.pose.orientation.x = qto[0]
            model_state.pose.orientation.y = qto[1]
            model_state.pose.orientation.z = qto[2]
            model_state.pose.orientation.w = qto[3]
            # a Twist can also be set but not recomended to do it in a service
            gms = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
            response = gms(model_state)
            return response
        except rospy.ServiceException, e:
            print "Service call failed: %s" % e
def loading_model():
    if torch.cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    else:
        torch.set_default_tensor_type('torch.FloatTensor')
    model = Net(resnet_level=24).to(DEVICE)
    try:
        print('===> Loading the saved model...')
        model.load_state_dict(torch.load(MODEL_SAVE_PATH, map_location=DEVICE))
        return model
    except FileNotFoundError:
        print('===> Loading the saved model fail, create a new one...')
        return model
    finally:
        pass
def init_para():
    start_time = time.perf_counter()
    try:
        print('===> Find the para_saved file')
        open(PARA_SAVE_PATH)
    except FileNotFoundError:
        print('===> The para_saved file Not exist, creating new one...')
        train_dataset_list = txt_to_path_list(TRAIN_DATA_PATH)
        random.shuffle(train_dataset_list)
        torch.save({
            'epoch': EPOCH,
            'batch_counter': BATCH_COUNTER,
            'lr': LR,
            'optimizer param_groups': torch.optim.Adam(Net().to(DEVICE).parameters(),
                                                       lr=LR,
                                                       betas=(0.9, 0.999),
                                                       eps=1e-08,
                                                       amsgrad=True).state_dict()['param_groups'][0],
            'train_dataset_list': train_dataset_list,
            'loss_list': [],
            'result_list': [],
            'hard_cases_list': [],
            'best_result': [0, 0]
        }, PARA_SAVE_PATH)
        print('==> Done with initialization!')
    finally:
        print('===> Init_para used time: ', time.perf_counter() - start_time)
Exemplo n.º 8
0
    def __call__(self, draw = False):
        
        # Load model
        net = Net()
        # Trained model
        net.load_state_dict(torch.load('./saved_models/model_checkpoint_kpd.pt')['model'])
        ## print out your net and prepare it for testing (uncomment the line below)
        net.eval()
        
        ## Data preparation
        transformations = transforms.Compose([Rescale(250),
                                             RandomCrop(224),
                                             Normalize(),
                                             ToTensor()])

        # create the transformed dataset
        transformed_data = KeypointsIterableDataset(self.image, self.keypoints, transform=transformations)
        data_loader = DataLoader(transformed_data, num_workers=0)
        
        ## if train flag is set, Start training picking up old checkpoint
        if self.train:
            print("Training...")
            ## Run each record twice for training.
            n_epochs = 2
            train_net(n_epochs, data_loader, net)
        
        ## Get the prediction
        print("Predicting...")
        test_images, test_pts, gt_pts, sample = test_net(data_loader, net)
    
        if draw:
            visualize_output(test_images, test_pts)
        # Rescaled.
        return InverseTransform()(sample)
Exemplo n.º 9
0
    def __init__(self, model_file="baseline1.pth"):
        # You should 
        #       1. create the model object
        #       2. load your state_dict
        #       3. call cuda()

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = Net()
        self.mdl = NetObj()
        self.modelObj = self.mdl.model
        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            self.model = nn.DataParallel(self.model)
            self.modelObj = nn.DataParallel(self.modelObj)
        checkpoint = torch.load("baseline1.pth")
        self.state_dict_1 = checkpoint['modelRoadMap_state_dict']
        self.state_dict_2 = checkpoint['modelObjectDetection_state_dict']
        self.model.load_state_dict(self.state_dict_1)
        self.modelObj.load_state_dict(self.state_dict_2)
        self.model.eval()
        self.modelObj.eval()
        self.model.to(device)
        self.modelObj.to(device)
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from Model import Net
from modelobj import NetObj
import numpy as np


model = Net()
mdl = NetObj()
modelObj = mdl.model

checkpoint = torch.load("modelobj/fasterrcnn_model_19.pth")
checkpoint1 = torch.load("model1/resnet18_model2_20.pth")
state_dict_1 = checkpoint1['modelRoadMap_state_dict']
state_dict_2 = checkpoint['modelObjectDetection_state_dict']
model.load_state_dict(state_dict_1)
modelObj.load_state_dict(state_dict_2)
PATH = "baseline1.pth"
torch.save({'modelRoadMap_state_dict': state_dict_1, 'modelObjectDetection_state_dict': state_dict_2},PATH)


Exemplo n.º 11
0
                jj = int(w0*j/w)
                newimg[i][j]=float(img[k][ii][jj])/n
    return newimg
    '''

def image_debug(env, h=100, w=100):
    img = get_image(env)*255
    img = Image.fromarray(img)
    img.show()
                

envlist = []
envlist.append(gym.make('CartPole-v0'))
#envlist.append(gym.make('MountainCar-v0'))

model = Net()
model_freezed = copy.deepcopy(model)
print(model)
print(model_freezed)
done = False
tot_reward = 0
env = envlist[0]

eps_s = 1
eps_e = 0.1
play_time = 2000000
display_step = 200

gamma = 0.9
mb_size = 16
frame_skip = 2
Exemplo n.º 12
0
def main():
    global opt, model
    opt = parser.parse_args()
    print(opt)

    cuda = opt.cuda
    if cuda and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --cuda")

    opt.seed = random.randint(1, 10000)
    print("Random Seed: ", opt.seed)
    torch.manual_seed(opt.seed)
    if cuda:
        torch.cuda.manual_seed(opt.seed)

    cudnn.benchmark = True

    print("===> Loading datasets")
    train_set = DatasetFromHdf5("data/train.h5")
    training_data_loader = DataLoader(dataset=train_set,
                                      num_workers=opt.threads,
                                      batch_size=opt.batchSize,
                                      shuffle=True)

    print("===> Building model")
    model = Net()
    criterion = nn.MSELoss(size_average=False)

    print("===> Setting GPU")
    if cuda:
        model = torch.nn.DataParallel(model).cuda()
        criterion = criterion.cuda()

    # optionally resume from a checkpoint
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            opt.start_epoch = checkpoint["epoch"] + 1
            model.load_state_dict(checkpoint["model"].state_dict())
        else:
            print("=> no checkpoint found at '{}'".format(opt.resume))

    # optionally copy weights from a checkpoint
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading model '{}'".format(opt.pretrained))
            weights = torch.load(opt.pretrained)
            model.load_state_dict(weights['model'].state_dict())
        else:
            print("=> no model found at '{}'".format(opt.pretrained))

    print("===> Setting Optimizer")
    optimizer = optim.SGD(model.parameters(),
                          lr=opt.lr,
                          momentum=opt.momentum,
                          weight_decay=opt.weight_decay)

    print("===> Training")
    for epoch in range(opt.start_epoch, opt.nEpochs + 1):
        train(training_data_loader, optimizer, model, criterion, epoch)
        save_checkpoint(model, epoch)
Exemplo n.º 13
0
class Trainer:
    def __init__(self, hidden_dim, buffer_size, gamma, batch_size, device, writer):
        self.env = make("connectx", debug=True)
        self.device = device
        self.policy = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                          self.env.configuration.columns).to(
            device)

        self.target = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                          self.env.configuration.columns).to(
            device)
        self.enemyNet = Net(self.env.configuration.columns * self.env.configuration.rows, hidden_dim,
                            self.env.configuration.columns).to(
            device)
        self.target.load_state_dict(self.policy.state_dict())
        self.target.eval()
        self.buffer = ExperienceReplay(buffer_size)
        self.enemy = "random"
        self.trainingPair = self.env.train([None, self.enemy])
        self.loss_function = nn.MSELoss()
        self.optimizer = optim.Adam(params=self.policy.parameters(), lr=0.001)
        self.gamma = gamma
        self.batch_size = batch_size

        self.first = True
        self.player = 1
        self.writer = writer

    def agent(self, observation, configuration):
        with torch.no_grad():
            state = torch.tensor(observation['board'], dtype=torch.float)
            reshaped = self.reshape(state)
            action = self.takeAction(self.enemyNet(reshaped).view(-1), reshaped, 0, False)
            return action

    def switch(self):
        self.trainingPair = self.env.train([None, "negamax"])
        self.enemy = "negamax"

    def switchPosition(self):
        self.env.reset()
        if self.first:
            self.trainingPair = self.env.train([self.enemy, None])
            self.player = 2
        else:
            self.trainingPair = self.env.train([None, self.enemy])
            self.player = 1
        self.first = not self.first

    def load(self, path):
        self.policy.load_state_dict(torch.load(path))

    def synchronize(self):
        self.target.load_state_dict(self.policy.state_dict())

    def save(self, name):
        torch.save(self.policy.state_dict(), name)

    def reset(self):
        self.env.reset()
        return self.trainingPair.reset()

    def step(self, action):
        return self.trainingPair.step(action)

    def addExperience(self, experience):
        self.buffer.append(experience)

    def epsilon(self, maxE, minE, episode, lastEpisode):
        return (maxE - minE) * max((lastEpisode - episode) / lastEpisode, 0) + minE

    def change_reward(self, reward, done):
        if done and reward == 1:
            return 10
        if done and reward == -1:
            return -10
        if reward is None and done:
            return -20
        if done:
            return 1
        if reward == 0:
            return 1 / 42
        else:
            return reward

    def change_reward_streak(self, reward, done, reshapedBoard, action, useStreak):
        if done and reward == 1:
            return 20
        if done and reward == -1:
            return -20
        if reward is None and done:
            return -40
        if done:
            return 1
        if reward == 0 & useStreak:
            return 1 / 42 + self.streakReward(self.player, reshapedBoard, action)
        if reward == 0:
            return 1 / 42
        else:
            return reward

    def streakReward(self, player, reshapedBoard, action):
        verticalReward = 0
        horizontalReward = 0
        if self.longestVerticalStreak(player, reshapedBoard, action) == 3:
            verticalReward = 3
        if self.longestHorizontalStreak(player, reshapedBoard, action) == 3:
            horizontalReward = 3
        return verticalReward + horizontalReward + self.longestDiagonalStreak(player, reshapedBoard, action)

    def longestVerticalStreak(self, player, reshapedBoard, action):
        count = 0
        wasZero = False
        for i in range(5, 0, -1):
            if reshapedBoard[0][player][i][action] == 0:
                wasZero = True
            if reshapedBoard[0][player][i][action] == 1 & wasZero:
                count = 0
                wasZero = False
            count += reshapedBoard[0][player][i][action]
        if reshapedBoard[0][0][0][action] == 0:
            return 0
        return count

    def longestHorizontalStreak(self, player, reshapedBoard, action):
        count = 0
        rowOfAction = self.rowOfAction(player, reshapedBoard, action)
        wasZero = False
        for i in range(7):
            if reshapedBoard[0][player][rowOfAction][i] == 0:
                wasZero = True
            if reshapedBoard[0][player][rowOfAction][i] == 1 & wasZero:
                count = 0
                wasZero = False
            count += reshapedBoard[0][player][rowOfAction][i]
        return count

    def longestDiagonalStreak(self, player, reshapedBoard, action):
        rowOfAction = self.rowOfAction(player, reshapedBoard, action)
        for row in range(4):
            for col in range(5):
                if reshapedBoard[0][player][row][col] == reshapedBoard[0][player][row + 1][col + 1] == \
                        reshapedBoard[0][player][row + 2][col + 2] == 1 and self.actionInDiagonal1(action, row, col,
                                                                                                   rowOfAction):
                    return 3
        for row in range(5, 1, -1):
            for col in range(4):
                if reshapedBoard[0][player][row][col] == reshapedBoard[0][player][row - 1][col + 1] == \
                        reshapedBoard[0][player][row - 2][col + 2] == 1 and self.actionInDiagonal2(action, row, col,
                                                                                                   rowOfAction):
                    return 3
        return 0

    def actionInDiagonal1(self, action, row, col, rowOfAction):
        return (rowOfAction == row and action == col or
                rowOfAction == row + 1 and action == col + 1 or
                rowOfAction == row + 2 and action == col + 2)

    def actionInDiagonal2(self, action, row, col, rowOfAction):
        return (rowOfAction == row and action == col or
                rowOfAction == row - 1 and action == col + 1 or
                rowOfAction == row - 2 and action == col + 2)

    def rowOfAction(self, player, reshapedBoard, action):
        rowOfAction = 10
        for i in range(6):
            if reshapedBoard[0][player][i][action] == 1:
                rowOfAction = min(i, rowOfAction)
        return rowOfAction

    def policyAction(self, board, episode, lastEpisode, minEp=0.1, maxEp=0.9):
        reshaped = self.reshape(torch.tensor(board))
        output = self.policy(reshaped).view(-1)
        return self.takeAction(output, reshaped, self.epsilon(maxEp, minEp, episode, lastEpisode))

    def takeAction(self, actionList: torch.tensor, board, epsilon, train=True):
        if (np.random.random() < epsilon) & train:
            # invalide actions rein=geht nicht
            #return torch.tensor(np.random.choice(len(actionList))).item()
            return np.random.choice([i for i in range(len(actionList)) if board[0][0][0][i] == 1])
        else:
            for i in range(7):
                if board[0][0][0][i] == 0:
                    actionList[i] = float('-inf')
            return torch.argmax(actionList).item()

    def reshape(self, board: torch.tensor, unsqz=True):
        tensor = board.view(-1, 7).long()
        # [0] = wo kann er reinwerfen(da wo es geht, steht eine 1), [1] = player1 (da wo es geht steht eine 0), [2] = player2 (da wo es geht steht eine 0)
        a = F.one_hot(tensor, 3).permute([2, 0, 1])
        b = a[:, :, :]
        if unsqz:
            return torch.unsqueeze(b, 0).float().to(self.device)
        return b.float().to(self.device)

    def preprocessState(self, state):
        state = self.reshape(torch.tensor(state), True)
        return state

    def trainActionFromPolicy(self, state, action):
        state = self.preprocessState(state)
        value = self.policy(state).view(-1).to(self.device)
        return value[action].to(self.device)

    def trainActionFromTarget(self, next_state, reward, done):
        next_state = self.preprocessState(next_state)
        target = self.target(next_state)
        target = torch.max(target, 1)[0].item()
        target = reward + ((self.gamma * target) * (1 - done))
        return torch.tensor(target).to(self.device)

    def train(self):
        if len(self.buffer) > self.batch_size:
            self.optimizer.zero_grad()
            states, actions, rewards, next_states, dones = self.buffer.sample(self.batch_size, self.device)
            meanLoss = 0
            for i in range(self.batch_size):
                value = self.trainActionFromPolicy(states[i], actions[i])
                target = self.trainActionFromTarget(next_states[i], rewards[i], dones[i])
                loss = self.loss_function(value, target)
                loss.backward()
                meanLoss += loss
            self.optimizer.step()
            return meanLoss / self.batch_size
Exemplo n.º 14
0
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
    np.random.seed(random_seed)
    np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)

train_load = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         sampler=train_sampler)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
net = Net()
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.7)

# Training
for epoch in range(5):
    for i, data in enumerate(train_load, 0):
        inputs, labels, name = data
        inputs = inputs.float()
        inputs, labels = inputs.to(device), labels.to(device)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
Exemplo n.º 15
0
    if type(m) == nn.Conv2d:
        nn.init.xavier_uniform_(m.weight.data,
                                gain=nn.init.calculate_gain('conv2d'))


"""
Initialize network and relevant variables
"""
root_dir = r'data/'

num_epoch = 2

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Using device:', device)

net = Net().to(device)
net.apply(init_weights)
"""
Load training data
"""
from torch.utils.data import DataLoader
from torchvision import transforms

composed = transforms.Compose([Normalize(), ToTensor()])

trainset = ImagesDataset(csv_file=root_dir + 'train.csv',
                         root_dir=root_dir,
                         transform=composed)
trainloader = DataLoader(trainset, batch_size=4, shuffle=True, num_workers=4)
"""
Train data
Exemplo n.º 16
0
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
    np.random.seed(random_seed)
    np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

valid_sampler = SubsetRandomSampler(val_indices)

test_load = torch.utils.data.DataLoader(dataset,
                                        batch_size=batch_size,
                                        sampler=valid_sampler)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
net = Net()
net.load_state_dict(torch.load("./cnn", 'cpu'))
net.eval()
net.to(device)

class_correct = list(0. for i in range(3))
class_total = list(0. for i in range(3))
with torch.no_grad():
    for i, data in enumerate(test_load):
        print("%d/%d" % (i, len(test_load)), end="\r", flush=True)
        images, labels, name = data
        images, labels = images.float().to(device), labels.to(device)
        outputs = net(images)
        _, predicted = torch.max(outputs, 1)
        c = (predicted == labels).squeeze()
        label = labels.item()
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import time

from Model import Net
model = Net(10)  #10 classes

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

if torch.cuda.is_available():
    model = model.cuda()
    criterion = criterion.cuda()

#from dataset import MeshData
#DataObject = MeshData(path)
st0 = time.time()
DataObject_X = np.load('x10.npy', allow_pickle=True)
DataObject_Y = np.load('y10.npy', allow_pickle=True)
st1 = time.time()
print(st1 - st0)

max_epochs = 30
for epochs in range(max_epochs):
    #print('e =',epochs)z
    running_loss = 0.0
    st2 = time.time()
    for i in range(len(DataObject_X)):
Exemplo n.º 18
0
valLoader = torch.utils.data.DataLoader(validation_trainset, batch_size=args.batch_size, shuffle=True, num_workers=2, drop_last=True, collate_fn=collate_fn)


# In[ ]:


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# In[ ]:


# import your model.

from Model import Net
model = Net()
print(model)
if torch.cuda.device_count() > 1:
    print("Let's use", torch.cuda.device_count(), "GPUs!")
    model = nn.DataParallel(model)

if args.reloadModel:
        model_fp = "model4/resnet18_model2_17.pth"
        model.load_state_dict(torch.load(model_fp)['modelRoadMap_state_dict'])
        print("model_loaded")


model.to(device)


# In[ ]:
Exemplo n.º 19
0
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from Model import Net

model = Net(10)  #10 classes

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

from dataset import MeshData

DataObject = MeshData(
    '/home/prathmesh/Desktop/SoC-2020/ModelNet10_stl/ModelNet10')
dataLoad = torch.utils.data.DataLoader(DataObject, batch_size=1, shuffle=True)
batch = next(iter(dataLoad))
print(len(batch))

max_epochs = 30
loss_list = []
for epochs in range(max_epochs):
    #print('e =',epochs)
    running_loss = 0.0
    for i, data in enumerate(dataLoad, 0):
        x, y = data
        x = x[0].float().to(device)
        #y = y.float()
        y = y[0].to(device)
        optimizer.zero_grad()
Exemplo n.º 20
0
import torch
from Model import Net
import torchvision.transforms as transforms

#=========Setting up==============
#=======Loading weight file to the model========
from Dataset import load_split_train_test
from Model import Net

classes = [
    'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
    'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'del', 'nothing',
    'space'
]

model = Net(out_fea=len(classes))

PATH = 'weight/epoch_6loss_0.15457870066165924.pt'
model.load_state_dict(torch.load(PATH))
model = model.eval()

img = cv2.imread(
    'P:/Hand-Symbol-Recognition/asl_alphabet_test/asl_alphabet_test/J_test.jpg'
)
trans = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
img = trans(img)
img = img.unsqueeze(0)
Exemplo n.º 21
0
                                   shuffle=False)
    test_loader = data.DataLoader(test_dataset,
                                  batch_size=BATCH_SIZE,
                                  shuffle=False)

    loss_total = []
    acc_total = []
    pred_total = []
    true_total = []

    for i in tqdm(range(TOTAL)):
        image_shape = full_dataset.x_data.shape[1:]

        device = torch.device(CUDA_N if torch.cuda.is_available() else 'cpu')
        torch.manual_seed(SEED[i])
        net = Net(image_shape, NUM_CLASS)
        net.to(device)
        print(net)

        softmax = nn.Softmax()
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.1)

        loss_list = []
        train_acc_list = []
        test_acc_list = []

        pred_temp = []
        true_temp = []

        for epoch in range(EPOCH):
Exemplo n.º 22
0
    img = img / 2 + 0.5  # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()


dataiter = iter(train_loader)
images, labels = dataiter.next()

imshow(torchvision.utils.make_grid(images))
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))

#=============Defining Training Parameters and type of Device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

model = Net(out_fea=len(classes))
model = model.train()
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

step = 0
loss_train = []
loss_val = []

min_loss = 100
patience = 5
training_loss_store = []
validation_loss_store = []

writer = SummaryWriter('writer')
Exemplo n.º 23
0
class ModelLoader():
    # Fill the information for your team
    team_name = 'LAG'
    team_member = ["Sree Gowri Addepalli"," Amartya prasad", "Sree Lakshmi Addepalli"]
    round_number = 1
    contact_email = '*****@*****.**'

    def __init__(self, model_file="baseline1.pth"):
        # You should 
        #       1. create the model object
        #       2. load your state_dict
        #       3. call cuda()

        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = Net()
        self.mdl = NetObj()
        self.modelObj = self.mdl.model
        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            self.model = nn.DataParallel(self.model)
            self.modelObj = nn.DataParallel(self.modelObj)
        checkpoint = torch.load("baseline1.pth")
        self.state_dict_1 = checkpoint['modelRoadMap_state_dict']
        self.state_dict_2 = checkpoint['modelObjectDetection_state_dict']
        self.model.load_state_dict(self.state_dict_1)
        self.modelObj.load_state_dict(self.state_dict_2)
        self.model.eval()
        self.modelObj.eval()
        self.model.to(device)
        self.modelObj.to(device)
        

    def get_bounding_boxes(self,samples):
        # samples is a cuda tensor with size [batch_size, 6, 3, 256, 306]
        # You need to return a tuple with size 'batch_size' and each element is a cuda tensor [N, 2, 4]
        # where N is the number of object

        batch_size = list(samples.shape)[0]
        # Convert it into [batch_size, 3, 512, 918]
        img_tensor = self.combine_images(samples,batch_size)
        tup_boxes = []
        with torch.no_grad():
            for img in img_tensor:
              prediction = self.modelObj([img.cuda()])
              cbox = self.convertBoundingBoxes(prediction[0]['boxes'])
              #print(cbox.shape)
              tup_boxes.append(cbox)
        return tuple(tup_boxes)

    def get_binary_road_map(self,samples):
        # samples is a cuda tensor with size [batch_size, 6, 3, 256, 306]
        # You need to return a cuda tensor with size [batch_size, 800, 800]
        with torch.no_grad(): 
            batch_size = list(samples.shape)[0]
            sample = samples.reshape(batch_size,18,256,306)
            output = self.model(sample)
            #print(output.shape)
            output = output.reshape(800,800)
            return output


    def combine_images(self, samples, batch_size):
        # samples is a cuda tensor with size [batch_size, 6, 3, 256, 306]
        # You need to return a tuple with size 'batch_size' and each element is a cuda tensor [N, 2, 4]
        # where N is the number of object
        ss = samples.reshape(batch_size, 2, 3, 3, 256, 306)
        t = ss.detach().cpu().clone().numpy().transpose(0, 3, 2, 1, 4, 5)
        # MergingImage
        tp = np.zeros((batch_size, 3, 3, 512, 306))
        for i in range(0, batch_size):
            for j in range(0, 3):
                for k in range(0, 3):
                    tp[i][j][k] = np.vstack([t[i][j][k][0], t[i][j][k][1]])
        tr = np.zeros((batch_size, 3, 512, 918))
        for i in range(0, batch_size):
            for j in range(0, 3):
                tr[i][j] = np.hstack([tp[i][j][0], tp[i][j][1], tp[i][j][2]])
        image_tensor = torch.from_numpy(tr).float()
        return image_tensor

    def convertBoundingBoxes(self, boxes):
        # convert [N,1,4] to [N,2,4]
        if len(boxes) == 0:
            boxes = [[0,0,0,0]]
        convBoxes = []
        for box in boxes:
            xmin = box[0]
            xmin = (xmin - 400)/10
            ymin = box[1]
            ymin = (-ymin +400)/10
            xmax = box[2]
            xmax = (xmax - 400)/10
            ymax = box[3]
            ymax = (-ymax + 400)/10
            cbox = [[xmin,xmin,xmax,xmax], [ymin,ymax,ymin,ymax]]
            convBoxes.append(cbox)
        convBoxes = torch.Tensor(convBoxes)
        return convBoxes
Exemplo n.º 24
0
        print(preprcess.TF_IDF_dict)
        preprcess.wordbagGenerate()
        preprcess.Save()

    print('wordbag: ')
    print(preprcess.wordbag)
    print('label_list: ')
    print(preprcess.label_list)

    x = np.array(preprcess.wordbag)
    y = np.array(preprcess.label_list)
    x = torch.from_numpy(x).type(torch.FloatTensor)
    y = torch.from_numpy(y)
    x, y = Variable(x), Variable(y)

    net = Net()

    optimizer = torch.optim.ASGD(net.parameters(), lr=0.002)
    criterion = torch.nn.CrossEntropyLoss()

    for t in range(10000):
        out = net(x)
        loss = criterion(out, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if t % 100 == 0:
            prediction = torch.max(F.softmax(out), 1)[1]
            pred_y = prediction.data.numpy().squeeze()
            target_y = y.data.numpy()
Exemplo n.º 25
0
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
from Model import Net

net = Net()

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

trainset = torchvision.datasets.CIFAR10(root='./data',
                                        train=True,
                                        download=True,
                                        transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
                                          batch_size=4,
                                          shuffle=True,
                                          num_workers=2)

testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=4,
                                         shuffle=False,
                                         num_workers=2)
Exemplo n.º 26
0
import os
import csv
import torch
import torch.optim as optim
import torch.nn as nn
from Model import Net
from DataLoader import DataLoader
from sklearn.model_selection import train_test_split

PATH = os.path.join("./modified_kaggle_HW2")

net = Net()

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

net.to(device)
net = nn.DataParallel(net)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
data_gen = DataLoader(PATH)
X_train_load, y_train_load, X_test = data_gen.load(preprocess=True)


def batch_feeder(X_train, y_train, batch_size):
    X_mini = []
    y_mini = []
    for i in range(X_train.shape[0] // batch_size):
        X_mini.append(X_train[i:i + batch_size])
        y_mini.append(y_train[i:i + batch_size])
    print(len(X_mini))