Ejemplo n.º 1
0
from ray.tune.logger import pretty_print
from config.config import load_config, print_config

cfg = load_config("./config/config.yml")
print_config(cfg)
print("seed:", type(cfg["seed"]), cfg["seed"])
print("rllib_config:monitor:", type(cfg["rllib_config"]["monitor"]))
print("rllib_config:lr:", type(cfg["rllib_config"]["lr"]))
print("timesteps_total:", type(cfg["timesteps_total"]))
print("env_config:resized_input_shape:",
      type(cfg["env_config"]["resized_input_shape"]))
Ejemplo n.º 2
0
def train(config, kwargs):
    # IMPORT MODEL==========================================================================
    if config.model_name == 'C3F4_CNN':
        from Model.C3F4_CNN import C3F4_CNN as Model
    else:
        raise Exception('Wrong name of the model!')
    
    # DIRECTORY FOR SAVING==================================================================
    snapshots_path = os.getcwd() + '/snapshots/patch_size' + str(config.patch_size) + '/'
    dir = snapshots_path + config.model_name + '_' + MODEL_SIGNATURE + '/'
    if not os.path.exists(dir):
        os.makedirs(dir)
    path_name_current_fold = dir + config.model_name
    
    # PRINT PARAMETERS=======================================================================
    config.print_config()
    with open(path_name_current_fold + '.txt', 'a') as f:
        print('#######################PARAMETERS#######################'
          '# Dataset selection\n'
          '\tmaxTrain: {}\n'
          '\tmax_trainData: {}\n'
          '\tCBLoss_gamma: {}\n'
          
          '# train/test parameters'
          '\tmodel_name: {}\n'
          '\toptimizer: {}\n'
          '\tepochs: {}\n'
          '\tbatch_size: {}\n'
          '\tseed: {}\n'
          '\tlr: {}\n'
          '\tweight_decay: {}\n'
          
          '# data preparation parameters\n'
          '\tdataset: {}\n'
          '\tpatch_size: {}\n'
          '\tband: {}\n'
          '\tnum_classes: {}\n'
          '\ttrain_percent: {}\n'
          '\tval_percent: {}\n'
          '\ttest_percent: {}\n'.format(
          config.maxTrain, config.max_trainData, config.CBLoss_gamma, config.model_name, config.optimizer,
          config.epochs, config.batch_size, config.seed,
          config.lr, config.weight_decay, config.dataset, config.patch_size, 
          config.band, config.num_classes, config.train_percent, config.val_percent, config.test_percent),file=f)
    
    # LOAD TRAINING DATA===================================================================
    # # data_augmentation
    # transform_train = T.Compose([T.RandomHorizontalFlip(),
    #                              T.RandomVerticalFlip(),
    #                              T.ToTensor()
    #                             ])
    print('\tload training data')
    train_dataloader = DataLoader(Hyperspectral_Dataset(config,train=True), \
                                    batch_size=config.batch_size,shuffle=True,**kwargs)
    
    # CREATE AND IMPORT MODEL=============================================================
    print('\tcreate model')
    model = Model(config)
    # model.apply(weights_init)
    print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0))
    if CUDA_AVAILABLE:
        model.cuda()
    # print(model)
    with open(path_name_current_fold + '.txt', 'a') as f:
        print('#############################  MODEL  ###################################\n', file=f)
        print(model, file=f)
        print('##############################################################################\n', file=f)

    # INIT OPTIMIZER========================================================================
    print('\tinit optimizer')
    if config.optimizer == 'Adagrad':
        optimizer = optim.Adagrad(model.parameters(), lr=config.lr, lr_decay=0, weight_decay=config.weight_decay)
    elif config.optimizer == 'SGD':
        optimizer = optim.SGD(model.parameters(), lr=config.lr, weight_decay=config.weight_decay, momentum=0.9)
    elif config.optimizer == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=config.lr, betas=(0.9,0.999), weight_decay=config.weight_decay)
    else:
        raise Exception('Wrong name of the optimizer!')
    # decrease lr 
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=config.step_size, gamma=0.1)
    # scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40], gamma=0.1)

    # PERFORM TRAINING EXPERIMENT==========================================================
    print('\tperform experiment\n')

    loss_batchSize = []
    Best_OA = 0
    Best_AA = 0
    Best_Kappa = 0
    Best_accuracy_perclass = []
    for epoch in range(1, config.epochs+1):
        time_start = time.time()
        scheduler.step()

        # set model in training mode
        model.train(True)
        train_loss = 0.
        train_number = 0.

        # start training
        for batch_idx, (train_images, train_labels) in enumerate(train_dataloader):
            # data preparation
            train_images, train_labels = train_images.type(torch.FloatTensor), train_labels.type(torch.LongTensor)
            if CUDA_AVAILABLE:
                train_images, train_labels = train_images.cuda(), train_labels.cuda()
            train_images, train_labels = Variable(train_images), Variable(train_labels)
            
            # reset gradients
            optimizer.zero_grad()
            # calculate loss
            loss = model.calculate_objective(train_images, train_labels)
            loss_batchSize.append(loss.item())
            train_loss += loss.item()
            train_number += len(train_labels)
            # backward pass
            loss.backward()
            # optimization
            optimizer.step()
        
        # calculate final loss
        train_loss = train_loss / train_number * 100
        
        time_end = time.time()
        time_elapsed = time_end - time_start
        print('Epoch %d/%d| Time: %.2fs| Loss: %.4f'%(epoch, config.epochs, time_elapsed, train_loss))
        with open(path_name_current_fold + '.txt', 'a') as f:
            print('Epoch %d/%d| Time: %.2fs| Loss: %.4f'%(epoch, config.epochs, time_elapsed, train_loss), file=f)

        if epoch % 20 == 0:
            torch.save(model, path_name_current_fold + str(epoch) +'.model')
            print('>>--{} model saved--<<'.format(path_name_current_fold+str(epoch)+'.model'))
            with open(path_name_current_fold + '.txt', 'a') as f:
                print('>>--{} model saved--<<'.format(path_name_current_fold+str(epoch)+'.model'), file=f)
            # Calculate accuary of testing dataset
            if config.dataset != 'garbage_crop_37' and config.dataset != 'img_crop_37_pool':
                OA, AA, Kappa, accuracy_perclass = test(config, kwargs, epoch, evaluate_model_assign=None, train_assign=False)
                if OA > Best_OA:
                    Best_OA = OA; Best_AA = AA; Best_Kappa = Kappa; Best_accuracy_perclass = accuracy_perclass
                elif OA == Best_AA:
                    if AA >= Best_AA:
                        Best_OA = OA; Best_AA = AA; Best_Kappa = Kappa; Best_accuracy_perclass = accuracy_perclass
    sio.savemat(path_name_current_fold + '.mat', {'loss': loss_batchSize})
    return Best_OA, Best_AA, Best_Kappa, Best_accuracy_perclass
Ejemplo n.º 3
0
###########################################################
# Restore training
if config['restore_seed'] >= 0:
    pretrained_config, checkpoint_path = \
        find_and_load_config_by_seed(config['restore_seed'],
                                     preselected_experiment_idx=config['restore_experiment_idx'],
                                     preselected_checkpoint_idx=config['restore_checkpoint_idx'])
    logger.warning("Overwriting config from {}".format(checkpoint_path))
    config = pretrained_config
    update_config(config, config_updates)
else:
    checkpoint_path = None

###########################################################
# Print config
print_config(config)

###########################################################
# Setup paths
paths = ArtifactPaths(config['experiment_name'],
                      config['seed'],
                      algo_name=config['algo'])

###########################################################
# Code backup
os.system('cp -ar ./duckietown_utils {}/'.format(paths.code_backup_path))
os.system('cp -ar ./experiments {}/'.format(paths.code_backup_path))
os.system('cp -ar ./config {}/'.format(paths.code_backup_path))

###########################################################
# Set up env and training config
    angle = (omega_r * radius - vel)/(0.5*baseline)
    angle_backup = (vel - omega_l * radius)/(0.5*baseline)
    # print("angle is {}, angle backup is {}".format(angle, angle_backup))
    assert math.isclose(angle, angle_backup)
    return np.array([vel, angle])

# Set up env
ray.init(**config["ray_init_config"])
register_env('Duckietown', launch_and_wrap_env)

###########################################################
# Restore agent
trainer = PPOTrainer(config=config["rllib_config"])
trainer.restore(checkpoint_path)

print_config(trainer.config)

# add seed to env config
seed = args.seed

actions = []
print(config['env_config'])
total_reward = 0
###########################################################
###########################################################
# Simple demonstration of closed loop performance
if not (
        args.analyse_trajectories or args.visualize_salient_obj or args.reward_plots or args.visualize_dot_trajectories):
    # env = Monitor(env, "gym_monitor_results", write_upon_reset=True, force=True)
    env = launch_and_wrap_duckieenv(config["env_config"], seed)
    print(env)