示例#1
0
    def __init__(self, gamma, decay_rate, size_x, size_y, use_deep_learning):

        self.iter = 0

        # Reinforcement learning parameters
        self.gamma = gamma
        self.decay_rate = decay_rate

        # Game parameters
        self.size_x = size_x
        self.size_y = size_y

        # Training data
        self.reward = 0

        # Actual situation
        self.state = None
        self.action = None
        self.direction = None
        self.snake_pos = None
        self.snake_body = None
        self.food_pos = None

        # Scores
        self.sum_score = 0
        self.best_score = 0
        self.avg_scores = []
        self.best_scores = []

        # GFX
        self.visual = Visual(size_x, size_y)

        # deep or not
        self.use_deep_learning = use_deep_learning
示例#2
0
文件: main.py 项目: rjmalves/lpoe
def main():
    # Configura a interface de chamada do programa via linha de
    # comando (CLI)
    str_descrip = ("Realiza um estudo de Planejamento Energético " +
                   "a partir de arquivos de entrada tabulares.\n")
    parser = argparse.ArgumentParser(description=str_descrip)
    parser.add_argument("entradas",
                        type=str,
                        nargs="+",
                        help="lista de caminhos relativos das entradas")
    parser.add_argument("-l",
                        "--log",
                        dest="l",
                        type=str,
                        default="WARNING",
                        help="nível de logging desejado ao executar")
    parser.add_argument("-s",
                        "--saida",
                        dest="s",
                        type=str,
                        default="results/",
                        help="diretorio raiz dos arquivos de saída")
    # Extrai os parâmetros fornecidos para a execução do programa
    args = parser.parse_args()
    if args.l not in ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]:
        raise Exception("Nível de LOG fornecido inválido")

    # Atualiza o nível de LOG desejado
    LOG_LEVEL = args.l
    coloredlogs.install(logger=logger, level=args.l)

    # Inicia a execução
    logger.info("#### ESTUDO DE MODELOS DE PLANEJAMENTO ENERGÉTICO ####")
    logger.info("------------------------------------------------------")
    resultados: List[Resultado] = []
    for entrada in args.entradas:

        e = LeituraEntrada(entrada, LOG_LEVEL)
        e.le_arquivo()
        # Determina o método de solução
        metodo = Metodo.obtem_metodo_pelo_nome(e.cfg.metodo)
        resultados.append(metodo.resolve(e, LOG_LEVEL))

    # Gera relatórios e gráficos de saída
    for resultado in resultados:
        caminho_saida = os.path.join(
            args.s, "{}/{}/".format(resultado.cfg.nome, int(time.time())))
        relator = EscreveSaida(resultado, caminho_saida, LOG_LEVEL)
        relator.escreve_relatorio()
        visualizador = Visual(resultado, caminho_saida, LOG_LEVEL)
        visualizador.visualiza()
    caminho_saida = os.path.join(args.s, "multi/{}/".format(int(time.time())))
    visualizador = MultiVisual(resultados, caminho_saida, LOG_LEVEL)
    visualizador.visualiza()
    logger.info("#### FIM DA EXECUÇÃO ####")
                                                optimizer_ft,
                                                opt_level='O1',
                                                loss_scale=128.0)
    # model_ft.load_state_dict(torch.load(config_map['resume_from_path']))
    # model_p = nn.DataParallel(model_ft, device_ids=train_cfg.gpu_ids)
    model_p = DDP(model_ft)

    if train_cfg.resume_from_path:
        print("resume from %s" % (train_cfg.resume_from_path))
        # model_p.load_state_dict(torch.load(train_cfg.resume_from_path))
        model_p, optimizer_ft, train_cfg.resume_epoch = load_checkpoint(
            model_p, optimizer_ft, train_cfg.resume_from_path)

    logger = create_logger(train_cfg.model_bpath, train_cfg.log_name)

    my_vis = Visual(train_cfg.model_bpath, log_to_file=train_cfg.vis_log)

    # Observe that all parameters are being optimized

    # Setup the loss fxn
    # criterion = nn.CrossEntropyLoss()
    criterion = ContrastiveLoss()

    dataloaders = {}
    train_loader = get_dataloader(CustomSiameseIterator, SiamesePipeline,
                                  out_map, train_cfg, True)
    test_loader = get_dataloader(CustomSiameseIterator, SiamesePipeline,
                                 out_map, train_cfg, False)

    # data_len = int(len(test_dataset) / train_cfg.batch_size)
    logger.info('the dataset has %d images' %
示例#4
0
backbone_net_p = nn.DataParallel(FCN_net, device_ids=config_map['gpu_ids'])
if config_map['resume_from_path']:
    backbone_net_p.load_state_dict(torch.load(config_map['resume_from_path']))

summary(backbone_net_p, (3, 448, 448), batch_size=config_map['batch_size'])

optimizer = torch.optim.SGD(backbone_net_p.parameters(),
                            lr=learning_rate,
                            momentum=0.99)  # , weight_decay=5e-4)

if not os.path.exists(config_map['base_save_path']):
    os.makedirs(config_map['base_save_path'])

logger = create_logger(config_map['base_save_path'], config_map['log_name'])

my_vis = Visual(config_map['base_save_path'],
                log_to_file=config_map['vis_log_path'])

# backbone_net_p.load_state_dict(torch.load('densenet_sgd_S7_yolo.pth'))

backbone_net_p.train()

transform = transforms.Compose([
    transforms.Lambda(cv_resize),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])

train_dataset = FCNDataset(list_file=config_map['train_txt_path'],
                           train=True,
                           transform=transform,
                           device=device,
示例#5
0
with_sgd = True
optimizer = torch.optim.SGD(backbone_net_p.parameters(), lr=learning_rate, momentum=0.99) # , weight_decay=5e-4)
opt_name = 'sgd'

if not with_sgd:
    optimizer = torch.optim.Adam(backbone_net_p.parameters(), lr=learning_rate, weight_decay=1e-8)
    opt_name = 'adam'

base_save_path = '%s_%s_cellSize%d/'%(backbone_type, opt_name, S)
if not os.path.exists(base_save_path):
    os.makedirs(base_save_path)

log_name = 'train'
logger = create_logger(base_save_path, log_name)

my_vis = Visual(base_save_path[:-1])

# backbone_net_p.load_state_dict(torch.load('densenet_sgd_S7_yolo.pth'))
lossLayer = YOLOLossV1(batch_size, S, B, clsN, lbd_coord, lbd_no_obj, _logger=logger, _vis=my_vis)

backbone_net_p.train()

transform = transforms.Compose([
        transforms.Lambda(cv_resize),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])



data_base = 'datasets/'
示例#6
0
class abcTrain(ABC):
    def __init__(self, gamma, decay_rate, size_x, size_y, use_deep_learning):

        self.iter = 0

        # Reinforcement learning parameters
        self.gamma = gamma
        self.decay_rate = decay_rate

        # Game parameters
        self.size_x = size_x
        self.size_y = size_y

        # Training data
        self.reward = 0

        # Actual situation
        self.state = None
        self.action = None
        self.direction = None
        self.snake_pos = None
        self.snake_body = None
        self.food_pos = None

        # Scores
        self.sum_score = 0
        self.best_score = 0
        self.avg_scores = []
        self.best_scores = []

        # GFX
        self.visual = Visual(size_x, size_y)

        # deep or not
        self.use_deep_learning = use_deep_learning

    def _reinit(self):
        x_init = random.randrange(4, self.size_x - 3)
        y_init = random.randrange(2, self.size_y - 3)
        self.snake_pos = [x_init, y_init]
        self.snake_body = [[x_init, y_init], [x_init - 1, y_init],
                           [x_init - 2, y_init]]

        self.food_pos = [
            random.randrange(1, self.size_x - 1),
            random.randrange(1, self.size_y - 1)
        ]
        self.direction = Direction.RIGHT
        self.state = self.get_state()
        self.action = self.choose_action(self.state)
        self.iter += 1

    @abstractmethod
    def update_hyperparameters(self):
        pass

    @abstractmethod
    def get_state(self):
        pass

    @abstractmethod
    def choose_action(self, state):
        pass

    # To be implemented in SARSA and Q-learning train
    def update_q_dict(self, state, state2, action, action2):
        pass

    # To be implemented in DQL train
    def handle_deep_learning(self, state1, state2, action1, reward):
        pass

    @abstractmethod
    def print_recap(self):
        pass

    def save_data(self):
        self.avg_scores.append(self.sum_score / self.decay_rate)
        self.best_scores.append(self.best_score)

    def iterate(self,
                visual=False,
                speed=0,
                test=False,
                reward_plus=100,
                reward_minus=-200,
                nb_step_max=250):
        if visual and speed == 0:
            raise Error("Usage: train.iterate(visual=True, speed=speed)")

        self._reinit()
        if self.iter % self.decay_rate == 0:
            self.print_recap()
            if not test:
                self.update_hyperparameters()
            self.save_data()
            self.best_score = 0
            self.sum_score = 0

        nb_step_wo_food = 0
        score = 0
        exit = False

        while not exit:
            nb_step_wo_food += 1

            if nb_step_wo_food >= nb_step_max:
                nb_step_wo_food = 0
                exit = True
                break

            new_state = self.get_state()
            new_action = self.choose_action(new_state)

            self.reward = 0

            # Make sure the snake cannot move in the opposite direction
            if not self.direction.is_opposite(new_action):
                self.direction = new_action

            # Move the snake
            self.direction.move(self.snake_pos)

            # Snake body growing mechanism
            self.snake_body.insert(0, list(self.snake_pos))
            if self.snake_pos == self.food_pos:
                score += 1
                nb_step_wo_food = 0
                self.reward = reward_plus
            else:
                self.snake_body.pop()

            # Spawning food on the screen
            if nb_step_wo_food == 0:
                self.food_pos = [
                    random.randrange(1, self.size_x),
                    random.randrange(1, self.size_y)
                ]

            # GFX
            if visual:
                self.visual.draw(self.snake_body, self.food_pos, score, speed)

            # Update data
            if not test:
                if self.use_deep_learning:
                    self.handle_deep_learning(self.state, new_state,
                                              self.action, self.reward)
                else:
                    self.update_q_dict(self.state, new_state, self.action,
                                       new_action)
            self.action = new_action
            self.state = new_state

            # Game Over conditions
            # - Getting out of bounds
            if (self.snake_pos[0] < 0 or self.snake_pos[0] > self.size_x - 1
                    or self.snake_pos[1] < 0
                    or self.snake_pos[1] > self.size_y - 1):
                self.reward = reward_minus
                exit = True
                break

            # - Touching the snake body
            for block in self.snake_body[1:]:
                if self.snake_pos == block:
                    self.reward = reward_minus
                    nb_step_wo_food = 0
                    exit = True
                    break

        self.sum_score += score
        if score > self.best_score:
            self.best_score = score

        return score
示例#7
0
parser = argparse.ArgumentParser(description='YOLO V1 Training params')
parser.add_argument('--config',
                    default='configs/resnet50_yolo_style_fpn_yolov3.py')
args = parser.parse_args()

config_map = Config()
train_config = config_map.train_config
device = "cuda:0" if torch.cuda.is_available() else "cpu"
# learning_rate = init_lr(config_map)
if not os.path.exists(train_config['base_save_path']):
    os.makedirs(train_config['base_save_path'])

logger = create_logger(train_config['base_save_path'],
                       train_config['log_name'])

my_vis = Visual(train_config['base_save_path'],
                log_to_file=train_config['vis_log_path'])

# yolo = init_model(config_map)
yolo = YOLO(config_map, logger=logger, vis=my_vis)
yolo_p = nn.DataParallel(yolo.to(device), device_ids=train_config['gpu_ids'])
if train_config['resume_from_path']:
    yolo_p.load_state_dict(torch.load(train_config['resume_from_path']))

print(yolo_p)
# summary(yolo_p, (3, 416, 416), batch_size=train_config['batch_size'])
# exit()
lr0 = train_config['lbd_map']['lr0']
lrf = train_config['lbd_map']['lrf']
momentum = train_config['lbd_map']['momentum']
weight_decay = train_config['lbd_map']['weight_decay']
resume_epoch = train_config['resume_epoch']
示例#8
0
        
   
    # Print the model we just instantiated
    # summary(model_p, (3, img_input_size, img_input_size))

    # Send the model to GPU

    # Gather the parameters to be optimized/updated in this run. If we are
    #  finetuning we will be updating all parameters. However, if we are 
    #  doing feature extract method, we will only update the parameters
    #  that we have just initialized, i.e. the parameters with requires_grad
    #  is True.

    logger = create_logger(config_map['model_save_path'], config_map['log_name'])

    my_vis = Visual(config_map['model_save_path'], log_to_file=config_map['vis_log_path'])   

    # Observe that all parameters are being optimized
    optimizer_ft = optim.SGD(model_p.parameters(), lr=0.1 * config_map['batch_size'] / 256.0, momentum=0.9)
    # optimizer_ft = optim.RMSprop(params_to_update, momentum=0.9)
    # optimizer_ft = optim.Adam(model_p.parameters(), lr=1e-2, eps=1e-8, betas=(0.9, 0.99), weight_decay=0.)
    # optimizer_ft = optim.Adadelta(params_to_update, lr=1)

    # Setup the loss fxn
    criterion = nn.CrossEntropyLoss()

    dataloaders = {}
    train_dataset = ClassifyDataset(base_data_path=config_map['train_data_path'], train=True, transform = data_transforms['train'], id_name_path=config_map['id_name_txt'], device=device, little_train=False)
    train_loader = DataLoader(train_dataset,batch_size=config_map['batch_size'], shuffle=True, num_workers=4, pin_memory=True)
    test_dataset = ClassifyDataset(base_data_path=config_map['test_data_path'], train=False,transform = data_transforms['val'], id_name_path=config_map['id_name_txt'], device=device, little_train=False, with_file_path=False)
    test_loader = DataLoader(test_dataset,batch_size=config_map['batch_size'],shuffle=True, num_workers=4, pin_memory=True)