Ejemplo n.º 1
0
def main(config):
    MODEL_NAME = config['name']
    BATCH_SIZE = int(config['batch_size'])
    DEVICE = int(config['device'])
    EPOCHS = int(config['epochs'])
    LR = float(config['lr'])
    WORKERS = int(config['num_workers'])

    model = get_model(MODEL_NAME)
    train_ds = ProteinDataset(train_names, TRAIN)
    val_ds = ProteinDataset(val_names, TRAIN, val_aug)
    optimizer = torch.optim.Adam(model.parameters(), lr=LR)
    trainer = Trainer(myloss, mymetric, optimizer, MODEL_NAME, model, None,
                      DEVICE)

    train_loader = torch.utils.data.DataLoader(train_ds,
                                               batch_size=BATCH_SIZE,
                                               shuffle=True,
                                               num_workers=WORKERS)
    val_loader = torch.utils.data.DataLoader(val_ds,
                                             batch_size=BATCH_SIZE,
                                             num_workers=WORKERS)

    model.to(DEVICE)

    for i in range(EPOCHS):
        trainer.train(train_loader)
        trainer.validate(val_loader)
Ejemplo n.º 2
0
from pretrainedmodels import resnet152, resnet18
from torch.utils.data import DataLoader
from torchvision.transforms import *
from training.training import Trainer
from generic_utils.metrics import mse
from generic_utils.output_watchers import RegressionWatcher
from pretrainedmodels.models import resnet50
from dataset.dataset import *
import torch.nn as nn

from utils import CsvAvitoProvider

trainer = Trainer('avito', lambda x, y: torch.sqrt(nn.MSELoss().cuda()(x, y)),
                  lambda x, y: pow(mse(x, y), 0.5))  # fixme

trainer.set_output_watcher(RegressionWatcher(trainer.watcher))
# proper losses

DATA = '/mnt/data/competition_files/train_jpg/'

rgb_mean = (0.4914, 0.4822, 0.4465)
rgb_std = (0.2023, 0.1994, 0.2010)
BATCH_SIZE = 256
tsize = 224
EPOCH_NUM = 200

train_transform = Compose([  # CenterCrop(100),
    RandomResizedCrop(size=tsize, scale=(0.7, 1)),
    RandomRotation(degrees=20),
    ColorJitter(0.5, 0.1, 0.1, 0.1),
    RandomHorizontalFlip(),
Ejemplo n.º 3
0
    plotting_iters=100,    # interval for logging graphs and policy rollouts
    # env_name=Env(),  # we are using a tiny environment here for testing
)
rollouts, policy = instantiate(params)
# policy.actor.load_state_dict(torch.load('./models/policy.pt'))

NUM_OF_PLAYERS = 2
config = {
    'num_players': NUM_OF_PLAYERS,
    'log_filename': './logs/policy_agent.log',
    'static_drawpile': False,
}
env = Env(config)
agents = [policy, policy]
env.set_agents(agents)
trainer = Trainer()
useHints=True
rewards, deck_ends = trainer.train(env, rollouts, policy, params, use_hints=useHints)

my_dict = {'multi_agent3': deck_ends}
with open('pickle_files/multi_agent3.pickle', 'wb') as f:
    pickle.dump(my_dict, f)

print("Training completed!")

torch.save(policy.actor.state_dict(), './models/policy_m3.pt')
# policy.actor.load_state_dict(torch.load('./models/policy.pt'))

evaluations = []
num_iter = 50
for i in range(num_iter):  # lets play 50 games
Ejemplo n.º 4
0
    strides_3d=config["strides_3d"],
    num_channels_inv_projection=config["num_channels_inv_projection"],
    num_channels_projection=config["num_channels_projection"],
    mode=config["mode"]
)

model.print_model_info()

model = model.to(device)

if config["multi_gpu"]:
    model = torch.nn.DataParallel(model)

# Set up trainer for renderer
trainer = Trainer(device, model, lr=config["lr"],
                  rendering_loss_type=config["loss_type"],
                  ssim_loss_weight=config["ssim_loss_weight"])

dataloader = scene_render_dataloader(path_to_data=config["path_to_data"],
                                     batch_size=config["batch_size"],
                                     img_size=config["img_shape"],
                                     crop_size=128)

# Optionally set up test_dataloader
if config["path_to_test_data"]:
    test_dataloader = scene_render_dataloader(path_to_data=config["path_to_test_data"],
                                              batch_size=config["batch_size"],
                                              img_size=config["img_shape"],
                                              crop_size=128)
else:
    test_dataloader = None
Ejemplo n.º 5
0
from training.training import Trainer
from the_game import RandomAgent
from the_game import Env
NUM_PLAYERS = 1
agents = []
for i in range(NUM_PLAYERS):
    agents.append(RandomAgent(i))

params = {
    'rollout_size': 500,
    'num_updates': 5,
    'discount': 0.99,
    'plotting_iters': 10,
    'env_name': '1p',
}
trainer = Trainer(NUM_PLAYERS, agents, params)
OBS_SIZE = len(trainer.parse_state(trainer.reset_game()[0]))
trainer = Trainer(NUM_PLAYERS, agents, params, obs_size=OBS_SIZE)
rewards, success_rate = trainer.train()
print(rewards)
Ejemplo n.º 6
0
    # env_name=Env(),  # we are using a tiny environment here for testing
)

NUM_OF_PLAYERS = 1

config = {
    'num_players': NUM_OF_PLAYERS,
    'log_filename': './logs/policy_agent.log',
    'static_drawpile': False,
}
logging.basicConfig(filename=config['log_filename'],
                    filemode='w',
                    level=logging.INFO)
env = Env(config)

rollouts, dueling_dqn = instantiate(params)
trainer = Trainer()
rewards, deck_ends = trainer.train(env, rollouts, dueling_dqn, params)
print("Training completed!")

torch.save(dueling_dqn.Q.state_dict(), './models/duelingDQN.pt')

evaluations = []
num_iter = 50
for i in range(num_iter):  # lets play 50 games
    env.run_PG(dueling_dqn)
    evaluations.append(env.get_num_cards_in_drawpile())
print('GAME OVER!')
plot_learning_curve(deck_ends, params.num_updates)
plot_testing(evaluations, num_iter)
Ejemplo n.º 7
0
    plotting_iters=100,    # interval for logging graphs and policy rollouts
    # env_name=Env(),  # we are using a tiny environment here for testing
)

NUM_OF_PLAYERS = 1

config = {
    'num_players': NUM_OF_PLAYERS,
    'log_filename': './logs/policy_agent.log',
    'static_drawpile': False,
}
logging.basicConfig(filename=config['log_filename'], filemode='w', level=logging.INFO)
env = Env(config)

rollouts, policy = instantiate(params)
trainer = Trainer()
rewards, deck_ends = trainer.train(env, rollouts, policy, params)

my_dict = {'single_agent': deck_ends}
with open('pickle_files/single_agent.pickle', 'wb') as f:
    pickle.dump(my_dict, f)
    
print("Training completed!")

torch.save(policy.actor.state_dict(), './models/policy.pt')

evaluations = []
num_iter = 50
for i in range(num_iter):  # lets play 50 games
    env.run_PG(policy)
    evaluations.append(env.get_num_cards_in_drawpile())
Ejemplo n.º 8
0
            bce(pred, ground_truth.unsqueeze(1)) +
            dice_loss(pred.view(-1), ground_truth.view(-1))))


def mymetric(x, y):
    m = (x > THRESH).float()
    return iou(m, y)


model = Res_Deeplab(NoLabels=1).float().to(DEVICE)

optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                    model.parameters()),
                             lr=1e-3)  #, momentum=0.975)

trainer = Trainer('salt', deeplab_4loss, mymetric, optimizer, 'deeplab',
                  DEVICE)
dataset = SegmentationDataset(MyTransform(),
                              SegmentationPathProvider(),
                              x_reader=OpencvReader(),
                              y_reader=OpencvGrayscaleReader())

train_loader = DataLoader(dataset, batch_size=BATCH_SIZE)
dataset.setmode('val')
val_loader = DataLoader(dataset, batch_size=BATCH_SIZE)
dataset.setmode('train')

#model = nn.DataParallel(Res_Deeplab(NoLabels=1)).float().cuda()

for i in range(EPOCHS):
    trainer.train(train_loader, model, i)
    trainer.validate(val_loader, model)