Exemplo n.º 1
0
import os
import torch
import numpy as np
import torch.optim as optim
from model import QNet
from utils.utils import *
from hparams import HyperParams as hp
from dqn_agent import train_model
from copy import deepcopy
from minecraft_env import env
from memory import Memory

if __name__ == "__main__":
    env = env.MinecraftEnv()
    env.init(allowContinuousMovement=["move", "turn"],
             videoResolution=[800, 600])
    env.seed(500)
    torch.manual_seed(500)
    render_map = False

    num_inputs = env.observation_space.shape
    num_actions = len(env.action_names[0])

    print('state size:', num_inputs)
    print('action size:', num_actions)

    model = QNet(num_actions)
    model.apply(weights_init)
    target_model = QNet(num_actions)
    update_target_model(model, target_model)
    model.train()
Exemplo n.º 2
0
from dqn_agent import train_model
from copy import deepcopy
from minecraft_env import env
from memory import Memory
import marlo
import gym


def stop_loop(event):
    event.stop()


if __name__ == "__main__":
    env = gym.make('MinecraftEating1-v0')
    env.init(
        allowDiscreteMovement=["move", "turn"],
        videoResolution=[800, 600],
    )
    # env = env.MinecraftEnv()
    # env.init(allowDiscreteMovement=None,
    #          videoResolution=[800, 600])
    env.seed(500)
    torch.manual_seed(500)
    render_map = False

    num_inputs = env.observation_space.shape
    num_actions = len(env.action_names[0])

    print('state size:', num_inputs)
    print('action size:', num_actions)

    model = QNet(num_actions)
Exemplo n.º 3
0
import numpy as np
from minecraft_env import env
from utils.utils import *
from darknet import Darknet
import cv2
import torch.nn as nn
import pickle as pkl
import pandas as pd

CUDA = torch.cuda.is_available()

env = env.MinecraftEnv()
env.init(
    allowContinuousMovement=["move", "turn"],
    continuous_discrete=False,
    videoResolution=[800, 600]
    )

num_classes = 80
confidence = 0.5
nms_thesh = 0.4
classes = load_classes('data/coco.names') 
print("Loading network.....")
model = Darknet("cfg/yolov3.cfg")
model.load_weights('save_model/yolov3.weights')
print("Network successfully loaded")

model.net_info["height"] = 416
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0 
assert inp_dim > 32
import os
import torch
import numpy as np
import torch.optim as optim
from model import QNet
from utils.utils import *
from hparams import HyperParams as hp
from dqn_agent import train_model
from copy import deepcopy
from minecraft_env import env
from memory import Memory

if __name__ == "__main__":
    env = env.MinecraftEnv()
    env.init(allowDiscreteMovement=None, videoResolution=[800, 600])
    env.seed(500)
    torch.manual_seed(500)
    render_map = False

    num_inputs = env.observation_space.shape
    num_actions = len(env.action_names[0])

    print('state size:', num_inputs)
    print('action size:', num_actions)

    model = QNet(num_actions)
    model.apply(weights_init)
    target_model = QNet(num_actions)
    update_target_model(model, target_model)
    model.train()
    target_model.train()