예제 #1
0
def run(args):

    if args.seed is not None:
        from utils.common import init_random
        init_random(args.seed)
    # log.info(json.dumps(vars(args), indent=4))

    datasets = {}
    dsname = args.dataset_val[0]
    root, cache_root = cfg.get_dataset_paths(dsname)
    dataset_cls = cfg.get_dataset_class(dsname)
    datasets[VAL] = dataset_cls(root=root,
                                cache_root=cache_root,
                                train=False,
                                test_split=args.test_split,
                                max_samples=args.val_count,
                                start=args.st,
                                use_cache=args.use_cache,
                                align_face_orientation=args.align,
                                crop_source=args.crop_source,
                                return_landmark_heatmaps=True,
                                landmark_sigma=args.sigma,
                                image_size=args.input_size)
    print(datasets[VAL])

    fntr = FabrecEval(datasets,
                      args,
                      args.sessionname,
                      workers=args.workers,
                      wait=args.wait)

    import torch
    torch.backends.cudnn.benchmark = True
    fntr.eval_epoch()
예제 #2
0
def train(domain_name: str, log_to_file: bool, seed: int, train_epochs: int, train_dialogs: int,
          eval_dialogs: int, max_turns: int, train_error_rate: float, test_error_rate: float,
          lr: float, eps_start: float, grad_clipping: float, buffer_classname: str, 
          buffer_size: int, use_tensorboard: bool):

    common.init_random(seed=seed)

    file_log_lvl = LogLevel.DIALOGS if log_to_file else LogLevel.NONE
    logger = DiasysLogger(console_log_lvl=LogLevel.RESULTS, file_log_lvl=file_log_lvl)
    if buffer_classname == "prioritized":
        buffer_cls = NaivePrioritizedBuffer
    elif buffer_classname == "uniform":
        buffer_cls = UniformBuffer

    domain = JSONLookupDomain(name=domain_name)
    bst = HandcraftedBST(domain=domain, logger=logger)
    user = HandcraftedUserSimulator(domain, logger=logger)
    noise = SimpleNoise(domain=domain, train_error_rate=train_error_rate, 
                        test_error_rate=test_error_rate, logger=logger)
    policy = DQNPolicy(domain=domain, lr=lr, eps_start=eps_start, 
                        gradient_clipping=grad_clipping, buffer_cls=buffer_cls, 
                        replay_buffer_size=buffer_size, train_dialogs=train_dialogs,
                        logger=logger)
    evaluator = PolicyEvaluator(domain=domain, use_tensorboard=use_tensorboard, 
                                experiment_name=domain_name, logger=logger)
    ds = DialogSystem(policy,
                    user,
                    noise,
                    bst,
                    evaluator)

    for epoch in range(train_epochs):
        # train one epoch
        ds.train()
        evaluator.start_epoch()
        for episode in range(train_dialogs):
            ds.run_dialog(max_length=max_turns)
        evaluator.end_epoch()   # important for statistics!
        ds.num_dialogs = 0  # IMPORTANT for epsilon scheduler in dqnpolicy
        policy.save()       # save model

        # evaluate one epoch
        ds.eval()
        evaluator.start_epoch()
        for episode in range(eval_dialogs):
            ds.run_dialog(max_length=max_turns)
        evaluator.end_epoch()   # important for statistics!
        ds.num_dialogs = 0 # IMPORTANT for epsilon scheduler in dqnpolicy
예제 #3
0
def run(args):

    from utils.common import init_random

    if args.seed is not None:
        init_random(args.seed)
    # log.info(json.dumps(vars(args), indent=4))

    datasets = {}
    for phase, dsnames, num_samples in zip((TRAIN, VAL),
                                           (args.dataset_train, args.dataset_val),
                                           (args.train_count, args.val_count)):
        train = phase == TRAIN
        datasets[phase] = DATASETS[dsnames[0]](train=train,
                                               max_samples=num_samples,
                                               use_cache=args.use_cache,
                                               start=args.st,
                                               test_split=args.test_split,
                                               align_face_orientation=args.align,
                                               crop_source=args.crop_source,
                                               return_landmark_heatmaps=lmcfg.PREDICT_HEATMAP and not args.train_coords,
                                               # return_landmark_heatmaps=True,
                                               return_modified_images=args.mod and train,
                                               landmark_sigma=args.sigma,
                                               # landmark_ids=lmcfg.LANDMARKS,
                                               daug=args.daug)
        print(datasets[phase])

    fntr = AAELandmarkTraining(datasets, args, session_name=args.sessionname, batch_size=args.batchsize,
                       macro_batch_size=args.macro_batchsize,
                       snapshot_interval=args.save_freq, snapshot=args.resume, workers=args.workers,
                       wait=args.wait)

    torch.backends.cudnn.benchmark = True
    if args.eval:
        fntr.eval_epoch()
    else:
        fntr.train(num_epochs=args.epochs)
예제 #4
0
log.console_level = log.LogLevel.RESULTS
log.file_level = log.LogLevel.DIALOGS
logger = log.DiasysLogger()
from modules.policy.rl.experience_buffer import UniformBuffer, NaivePrioritizedBuffer

from dialogsystem import DialogSystem
from modules.bst import HandcraftedBST
from modules.policy import DQNPolicy
from modules.simulator import HandcraftedUserSimulator
from utils.domain.jsonlookupdomain import JSONLookupDomain
from modules.policy.evaluation import PolicyEvaluator

from utils import common

if __name__ == "__main__":
    common.init_random()

    TRAIN_EPISODES = 1000
    NUM_TEST_SEEDS = 10
    EVAL_EPISODES = 500
    MAX_TURNS = -1
    TRAIN_EPOCHS = 10

    # get #num_test_seeds random seeds
    random_seeds = []
    for i in range(NUM_TEST_SEEDS):
        random_seeds.append(random.randint(0, 2**32-1))

    results = {}
    for seed in random_seeds:
        common.init_once = False
예제 #5
0
def extract_main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--st', default=None, type=int)
    parser.add_argument('--nd', default=None, type=int)
    parser.add_argument('--split', default='train')
    args = parser.parse_args()
    extract_features(args.split, st=args.st, nd=args.nd)


if __name__ == '__main__':
    # extract_main()
    # exit()
    from utils.nn import Batch
    import utils.common as util
    util.init_random()

    ds = VggFace2(train=True, deterministic=True, use_cache=False, align_face_orientation=False,
                  return_modified_images=False, image_size=256)
    micro_batch_loader = td.DataLoader(ds, batch_size=10, shuffle=True, num_workers=0)

    f = 1.0
    t = time.perf_counter()
    for iter, data in enumerate(micro_batch_loader):
        print('t load:', time.perf_counter() - t)
        t = time.perf_counter()
        batch = Batch(data, gpu=False)
        print('t Batch:', time.perf_counter() - t)
        images = ds_utils.denormalized(batch.images)
        vis.vis_square(images, fx=f, fy=f, normalize=False, nCols=10, wait=0)
예제 #6
0
def train(domain_name: str, log_to_file: bool, seed: int, train_epochs: int, train_dialogs: int,
          eval_dialogs: int, max_turns: int, train_error_rate: float, test_error_rate: float,
          lr: float, eps_start: float, grad_clipping: float, buffer_classname: str,
          buffer_size: int, use_tensorboard: bool):

    """
        Training loop for the RL policy, for information on the parameters, look at the descriptions
        of commandline arguments in the "if main" below
    """
    seed = seed if seed != -1 else None
    common.init_random(seed=seed)

    file_log_lvl = LogLevel.DIALOGS if log_to_file else LogLevel.NONE
    logger = DiasysLogger(console_log_lvl=LogLevel.RESULTS, file_log_lvl=file_log_lvl)

    summary_writer = SummaryWriter(log_dir='logs') if use_tensorboard else None
    
    if buffer_classname == "prioritized":
        buffer_cls = NaivePrioritizedBuffer
    elif buffer_classname == "uniform":
        buffer_cls = UniformBuffer

    domain = JSONLookupDomain(name=domain_name)
    
    bst = HandcraftedBST(domain=domain, logger=logger)
    user = HandcraftedUserSimulator(domain, logger=logger)
    # noise = SimpleNoise(domain=domain, train_error_rate=train_error_rate,
    #                     test_error_rate=test_error_rate, logger=logger)
    policy = DQNPolicy(domain=domain, lr=lr, eps_start=eps_start,
                    gradient_clipping=grad_clipping, buffer_cls=buffer_cls,
                    replay_buffer_size=buffer_size, train_dialogs=train_dialogs,
                    logger=logger, summary_writer=summary_writer)
    evaluator = PolicyEvaluator(domain=domain, use_tensorboard=use_tensorboard,
                                experiment_name=domain_name, logger=logger,
                                summary_writer=summary_writer)
    ds = DialogSystem(services=[user, bst, policy, evaluator], protocol='tcp')
    # ds.draw_system_graph()

    error_free = ds.is_error_free_messaging_pipeline()
    if not error_free:
        ds.print_inconsistencies()

    for j in range(train_epochs):
        # START TRAIN EPOCH
        evaluator.train()
        policy.train()
        evaluator.start_epoch()
        for episode in range(train_dialogs):
            if episode % 100 == 0:
                print("DIALOG", episode)
            logger.dialog_turn("\n\n!!!!!!!!!!!!!!!! NEW DIALOG !!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n")
            ds.run_dialog(start_signals={f'user_acts/{domain.get_domain_name()}': []})
        evaluator.end_epoch()
        policy.save()

        # START EVAL EPOCH
        evaluator.eval()
        policy.eval()
        evaluator.start_epoch()
        for episode in range(eval_dialogs):
            logger.dialog_turn("\n\n!!!!!!!!!!!!!!!! NEW DIALOG !!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n")
            ds.run_dialog(start_signals={f'user_acts/{domain.get_domain_name()}': []})
        evaluator.end_epoch()
    ds.shutdown()
예제 #7
0
        default=25,
        type=int,
        help=
        "maximum turns per dialog (dialogs with more turns will be terminated and counting as failed"
    )

    parser.add_argument("-eer",
                        "--evalerror",
                        type=float,
                        default=0.0,
                        help="simulation error rate while evaluating")

    args = parser.parse_args()

    # init random generator and logger
    common.init_random(args.randomseed)
    file_log_lvl = LogLevel.DIALOGS if args.logtofile else LogLevel.NONE
    logger = DiasysLogger(console_log_lvl=LogLevel.RESULTS,
                          file_log_lvl=file_log_lvl)

    # choose 'real' domain name from shorthand
    if args.domain == 'courses':
        domain_name = 'ImsCourses'
    elif args.domain == 'lecturers':
        domain_name = 'ImsLecturers'

    test_hdc_usersim(domain_name=domain_name,
                     logger=logger,
                     eval_epochs=args.epochs,
                     eval_dialogs=args.evaldialogs,
                     max_turns=args.maxturns,
예제 #8
0
evaluator = PolicyEvaluator(domain=super_domain,
                            use_tensorboard=True,
                            experiment_name="tutorial",
                            logger=dialogue_logger,
                            summary_writer=summary_writer)

# SET CONSTANTS
TRAIN_EPOCHS = 1
TRAIN_EPISODES = 1000
EVAL_EPISODES = 1000
MAX_TURNS = 25
bst = HandcraftedBST(domain=super_domain)
# Choose how many repeated trials
for i in range(1):
    common.init_random(
    )  # add seed here as a parameter to the init_random if wanted

    ds = DialogSystem(services=[user_sim, bst, policy, evaluator],
                      protocol='tcp')

    # Start train/eval loop
    for j in range(TRAIN_EPOCHS):
        # START TRAIN EPOCH
        evaluator.train()
        policy.train()
        evaluator.start_epoch()
        for episode in range(TRAIN_EPISODES):
            if episode % 100 == 0:
                print("DIALOG", episode)
            logger.dialog_turn(
                "\n\n!!!!!!!!!!!!!!!! NEW DIALOG !!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n"
예제 #9
0
from utils import common

if __name__ == "__main__":
    logger = DiasysLogger(console_log_lvl=LogLevel.RESULTS,
                          file_log_lvl=LogLevel.DIALOGS)

    turns = {}
    success = {}
    for domain in [
            JSONLookupDomain(domain_str) for domain_str in ['ImsCourses']
    ]:
        turns[domain.name] = []
        success[domain.name] = []
        for i in range(5):
            common.init_random()  # add seed here if wanted

            TRAIN_EPOCHS = 10
            TRAIN_EPISODES = 1000
            EVAL_EPISODES = 1000
            MAX_TURNS = 25

            bst = HandcraftedBST(domain=domain, logger=logger)
            user = HandcraftedUserSimulator(domain, logger=logger)
            noise = SimpleNoise(domain=domain,
                                train_error_rate=0.30,
                                test_error_rate=0.30,
                                logger=logger)
            policy = DQNPolicy(domain=domain,
                               lr=0.0001,
                               eps_start=0.3,