예제 #1
0
파일: train.py 프로젝트: witchu/alphazero
def train(argv):
    import argparse
    parser = argparse.ArgumentParser(description='train nn from history file')
    parser.add_argument('game', help='a game name i.e. checkers')
    parser.add_argument('history', help='history file')
    parser.add_argument('input', help='input model file name')
    parser.add_argument('output', help='output model file name')
    parser.add_argument('--progress',
                        action='store_true',
                        help='show progress bar')
    parser.add_argument('--epoch', default=1, type=int, help='training epochs')
    parser.add_argument('--batch', default=256, type=int, help='batch size')
    parser.add_argument('--block', default=100000, type=int, help='block size')
    parser.add_argument('--gpu', type=float, help='gpu memory fraction')
    args = parser.parse_args(argv)

    # set gpu memory
    if args.gpu != None:
        import tensorflow as tf
        from keras.backend.tensorflow_backend import set_session

        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = args.gpu
        set_session(tf.Session(config=config))

    from util import game
    State = game.importState(args.game).State
    NN = game.importNn(args.game).NN

    state = State()
    nn = NN(args.input)

    for epoch in range(args.epoch):
        from util.trainer import train
        train(state, nn, args.history, {
            'batch': args.batch,
            'block': args.block,
            'progress': args.progress,
        })
        nn.save(args.output)
예제 #2
0
파일: loop.py 프로젝트: witchu/alphazero
def loop(argv):
    import argparse
    parser = argparse.ArgumentParser(description='loop generate and train')
    parser.add_argument('game', help='a game name i.e. checkers')
    parser.add_argument('begin', default=1, type=int, help='begin with generation number')
    parser.add_argument('count', nargs='?', default=100, type=int, help='number of generation')
    parser.add_argument('-n', '--number', default=10000, type=int, help='number of generated states')
    parser.add_argument('-s', '--simulation', default=100, type=int, help='number of simulations per move')
    parser.add_argument('--hard', default=0, type=int, help='number of random moves')
    parser.add_argument('--soft', default=1000, type=int, help='number of random moves that depends on visited node count')
    parser.add_argument('--epoch', default=1, type=int, help='training epochs')
    parser.add_argument('--batch', default=256, type=int, help='batch size')
    parser.add_argument('--block', default=100000, type=int, help='block size')
    parser.add_argument('--gpu', type=float, help='gpu memory fraction')
    args = parser.parse_args(argv)

    # set gpu memory
    if args.gpu != None:
        import tensorflow as tf
        from keras.backend.tensorflow_backend import set_session

        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = args.gpu
        set_session(tf.Session(config=config))

    from util import game
    State = game.importState(args.game).State
    NN = game.importNn(args.game).NN

    state = State()
    state_no = args.begin
    nn = NN('{:06}.h5'.format(state_no))

    for i in range(args.count):
        history_filename = '{:06}.txt'.format(state_no)
        # generate
        with open(history_filename, 'a') as file:
            def save_to_file(result):
                file.write(result)
                file.write('\n')
                file.flush()
            callback = save_to_file
            from util.generator import generate
            generate(state, nn, callback, {
                'selfplay': args.number,
                'simulation': args.simulation,
                'hard_random': args.hard,
                'soft_random': args.soft,
                'progress': True,
            })
        # train
        for epoch in range(args.epoch):
            from util.trainer import train
            train(state, nn, history_filename, {
                'batch': args.batch,
                'block': args.block,
                'progress': True,
            })
        # next generation
        state_no += 1
        nn.save('{:06}.h5'.format(state_no))
예제 #3
0
                                   collate_fn=tr.collate)
    val_data_loader = DataLoader(val_data,
                                 batch_size=batch_size,
                                 collate_fn=tr.collate)
    test_data_loader = DataLoader(test_data,
                                  batch_size=batch_size,
                                  collate_fn=tr.collate)

    model = TGNN(cc.num_atom_feats, cc.num_bond_feats, 1).cuda()
    # optimizer = AdaBound(model.parameters(), lr=1e-6, weight_decay=1e-8)
    optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-8)
    criterion = torch.nn.L1Loss()
    best_val_loss = 1e+8

    for epoch in range(0, num_epochs):
        train_loss = tr.train(model, optimizer, train_data_loader, criterion)
        val_loss, val_rmse, _, _, _ = tr.test(model, val_data_loader,
                                              criterion)
        test_loss, test_rmse, idxs, targets, preds = tr.test(
            model, test_data_loader, criterion)
        print(
            'Epoch [{}/{}]\tTrain loss: {:.4f}\tVal loss: {:.4f} ({:.4f})\tTest loss: {:.4f} ({:.4f})'
            .format(epoch + 1, num_epochs, train_loss, val_loss, val_rmse,
                    test_loss, test_rmse))

        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_test_loss = test_loss
            best_test_rmse = test_rmse
            best_idxs = idxs
            best_targets = targets
예제 #4
0
def classify(dev_id=None):
    # Prepare model related meta data
    mdl_name = args.model.lower().replace(' ', '_')
    common_cfg = cfgr('validate', 'common')
    pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
    config_kwargs = dict([(k, v) for k, v in args.__dict__.items() if not k.startswith('_') and k not in set(['dataset', 'model', 'template']) and v is not None and not callable(v)])
    config = Configurable(args.task, mdl_name, common_cfg=common_cfg, wsdir=PAR_DIR, **config_kwargs)
    params = pr('LM', config.lm_params) if mdl_name != 'none' else {}
    use_gpu = dev_id is not None
    tokenizer = config.tknzr.from_pretrained(params['pretrained_vocab_path'] if 'pretrained_vocab_path' in params else config.lm_mdl_name) if config.tknzr else {}
    _adjust_encoder(tokenizer, config)

    # Prepare task related meta data.
    task_path, task_type, task_dstype, task_cols, task_trsfm, task_extparms = config.input if config.input and os.path.isdir(os.path.join(DATA_PATH, config.input)) else config.task_path, config.task_type, config.task_ds, config.task_col, config.task_trsfm, config.task_ext_params
    ds_kwargs = config.ds_kwargs

    # Prepare data
    if (not config.distrb or config.distrb and hvd.rank() == 0): logging.info('Dataset path: %s' % os.path.join(DATA_PATH, task_path))
    train_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'train.%s' % config.fmt), tokenizer, config, **ds_kwargs)
    # Calculate the class weights if needed
    lb_trsfm = [x['get_lb'] for x in task_trsfm[1] if 'get_lb' in x]
    if (not config.weight_class or task_type == 'sentsim'):
        class_count = None
    elif len(lb_trsfm) > 0:
        lb_df = train_ds.df[task_cols['y']].apply(lb_trsfm[0])
        class_count = np.array([[1 if lb in y else 0 for lb in train_ds.binlb.keys()] for y in lb_df]).sum(axis=0)
    else:
        lb_df = train_ds.df[task_cols['y']]
        binlb = task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else train_ds.binlb
        class_count = lb_df.value_counts()[binlb.keys()].values
    if (class_count is None):
        class_weights = None
        sampler = None
    else:
        class_weights = torch.Tensor(1.0 / class_count)
        class_weights /= class_weights.sum()
        sampler = None # WeightedRandomSampler does not work in new version
        # sampler = WeightedRandomSampler(weights=class_weights, num_samples=config.bsize, replacement=True)
        if not config.distrb and type(dev_id) is list: class_weights = class_weights.repeat(len(dev_id))

    # Partition dataset among workers using DistributedSampler
    if config.distrb: sampler = torch.utils.data.distributed.DistributedSampler(train_ds, num_replicas=hvd.size(), rank=hvd.rank())

    train_loader = DataLoader(train_ds, batch_size=config.bsize, shuffle=sampler is None and config.droplast, sampler=sampler, num_workers=config.np, drop_last=config.droplast)

    # Classifier
    if (not config.distrb or config.distrb and hvd.rank() == 0):
        logging.info('Language model input fields: %s' % config.input_keys)
        logging.info('Classifier hyper-parameters: %s' % config.clf_ext_params)
        logging.info('Classifier task-related parameters: %s' % task_extparms['mdlaware'])
    if (config.resume):
        # Load model
        clf, prv_optimizer, resume, chckpnt = load_model(config.resume)
        if config.refresh:
            logging.info('Refreshing and saving the model with newest code...')
            try:
                if (not distrb or distrb and hvd.rank() == 0):
                    save_model(clf, prv_optimizer, '%s_%s.pth' % (config.task, config.model))
            except Exception as e:
                logging.warning(e)
        # Update parameters
        clf.update_params(task_params=task_extparms['mdlaware'], **config.clf_ext_params)
        if (use_gpu): clf = _handle_model(clf, dev_id=dev_id, distrb=config.distrb)
        # Construct optimizer
        optmzr_cls = config.optmzr if config.optmzr else (torch.optim.Adam, {}, None)
        optimizer = optmzr_cls[0](clf.parameters(), lr=config.lr, weight_decay=config.wdecay, **optmzr_cls[1]) if config.optim == 'adam' else torch.optim.SGD(clf.parameters(), lr=config.lr, momentum=0.9)
        if prv_optimizer: optimizer.load_state_dict(prv_optimizer.state_dict())
        training_steps = int(len(train_ds) / config.bsize) if hasattr(train_ds, '__len__') else config.trainsteps
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(config.wrmprop*training_steps), num_training_steps=training_steps) if not config.noschdlr and len(optmzr_cls) > 2 and optmzr_cls[2] and optmzr_cls[2] == 'linwarm' else None
        if (not config.distrb or config.distrb and hvd.rank() == 0): logging.info((optimizer, scheduler))
    else:
        # Build model
        lm_model, lm_config = gen_mdl(config, use_gpu=use_gpu, distrb=config.distrb, dev_id=dev_id)
        clf = gen_clf(config, lm_model, lm_config, num_lbs=len(train_ds.binlb) if train_ds.binlb else 1, mlt_trnsfmr=True if task_type in ['entlmnt', 'sentsim'] and task_extparms['mdlaware'].setdefault('sentsim_func', None) is not None else False, task_params=task_extparms['mdlaware'], binlb=train_ds.binlb, binlbr=train_ds.binlbr, use_gpu=use_gpu, distrb=config.distrb, dev_id=dev_id, **config.clf_ext_params)
        optmzr_cls = config.optmzr if config.optmzr else (torch.optim.Adam, {}, None)
        optimizer = optmzr_cls[0](clf.parameters(), lr=config.lr, weight_decay=config.wdecay, **optmzr_cls[1]) if config.optim == 'adam' else torch.optim.SGD(clf.parameters(), lr=config.lr, momentum=0.9)
        training_steps = int(len(train_ds) / config.bsize) if hasattr(train_ds, '__len__') else config.trainsteps
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config.wrmprop, num_training_steps=training_steps) if not config.noschdlr and len(optmzr_cls) > 2 and optmzr_cls[2] and optmzr_cls[2] == 'linwarm' else None
        if (not config.distrb or config.distrb and hvd.rank() == 0): logging.info((optimizer, scheduler))

    config.execute_all_callback()
    if config.verbose:
        logging.debug(config.__dict__)
        torch.autograd.set_detect_anomaly(True)
    if config.configfmt == 'yaml':
        config.to_yaml()
    else:
        config.to_json()

    if config.distrb:
        # Add Horovod Distributed Optimizer
        optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=clf.named_parameters())
        # Broadcast parameters from rank 0 to all other processes.
        hvd.broadcast_parameters(clf.state_dict(), root_rank=0)

    # Training
    train(clf, optimizer, train_loader, config, scheduler, weights=class_weights, lmcoef=config.lmcoef, clipmaxn=config.clipmaxn, epochs=config.epochs, earlystop=config.earlystop, earlystop_delta=config.es_delta, earlystop_patience=config.es_patience, use_gpu=use_gpu, devq=dev_id, distrb=config.distrb, resume=resume if config.resume else {})

    if config.distrb:
        if hvd.rank() == 0:
            clf = _handle_model(clf, dev_id=dev_id, distrb=False)
        else:
            return

    if config.noeval: return
    dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev.%s' % config.fmt), tokenizer, config, binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else train_ds.binlb, **ds_kwargs)
    dev_loader = DataLoader(dev_ds, batch_size=config.bsize, shuffle=False, num_workers=config.np)
    test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test.%s' % config.fmt), tokenizer, config, binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else train_ds.binlb, **ds_kwargs)
    test_loader = DataLoader(test_ds, batch_size=config.bsize, shuffle=False, num_workers=config.np)
    logging.debug(('binlb', train_ds.binlb, dev_ds.binlb, test_ds.binlb))

    # Evaluation
    eval(clf, dev_loader, config, ds_name='dev', use_gpu=use_gpu, devq=dev_id, distrb=config.distrb, ignored_label=task_extparms.setdefault('ignored_label', None))
    if config.traindev: train(clf, optimizer, dev_loader, config, scheduler=scheduler, weights=class_weights, lmcoef=config.lmcoef, clipmaxn=config.clipmaxn, epochs=config.epochs, earlystop=config.earlystop, earlystop_delta=config.es_delta, earlystop_patience=config.es_patience, use_gpu=use_gpu, devq=dev_id, distrb=config.distrb)
    eval(clf, test_loader, config, ds_name='test', use_gpu=use_gpu, devq=dev_id, distrb=config.distrb, ignored_label=task_extparms.setdefault('ignored_label', None))
예제 #5
0
def multi_clf(dev_id=None):
    '''Train multiple classifiers and use them to predict multiple set of labels'''
    import inflect
    from bionlp.util import fs
    iflteng = inflect.engine()

    logging.info('### Multi Classifier Head Mode ###')
    # Prepare model related meta data
    mdl_name = args.model.lower().replace(' ', '_')
    common_cfg = cfgr('validate', 'common')
    pr = io.param_reader(os.path.join(PAR_DIR, 'etc', '%s.yaml' % common_cfg.setdefault('mdl_cfg', 'mdlcfg')))
    config_kwargs = dict([(k, v) for k, v in args.__dict__.items() if not k.startswith('_') and k not in set(['dataset', 'model', 'template']) and v is not None and type(v) is not function])
    config = Configurable(args.task, mdl_name, common_cfg=common_cfg, wsdir=PAR_DIR, **config_kwargs)
    params = pr('LM', config.lm_params) if mdl_name != 'none' else {}
    use_gpu = dev_id is not None
    tokenizer = config.tknzr.from_pretrained(params['pretrained_vocab_path'] if 'pretrained_vocab_path' in params else config.lm_mdl_name) if config.tknzr else None
    task_type = config.task_type
    _adjust_encoder(tokenizer, config)
    special_tknids_args = dict(zip(special_tkns[0], special_tknids))
    task_trsfm_kwargs = dict(list(zip(special_tkns[0], special_tknids))+[('model',args.model), ('sentsim_func', args.sentsim_func), ('seqlen',args.maxlen)])
    # Prepare task related meta data.
    task_path, task_dstype, task_cols, task_trsfm, task_extparms = args.input if args.input and os.path.isdir(os.path.join(DATA_PATH, args.input)) else config.task_path, config.task_ds, config.task_col, config.task_trsfm, config.task_ext_params
    trsfms = (task_trsfm[0] if len(task_trsfm) > 0 else [])
    # trsfms_kwargs = ([] if args.model in LM_EMBED_MDL_MAP else ([{'seqlen':args.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}] if TASK_TYPE_MAP[args.task]=='nmt' else [{'seqlen':args.maxlen, 'trimlbs':task_extparms.setdefault('trimlbs', False), 'special_tkns':special_tknids_args}, task_trsfm_kwargs, {'seqlen':args.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}])) + (task_trsfm[1] if len(task_trsfm) >= 2 else [{}] * len(task_trsfm[0]))
    trsfms_kwargs = ([] if hasattr(config, 'embed_type') and config.embed_type else ([{'seqlen':args.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}] if config.task_type=='nmt' else [{'seqlen':args.maxlen, 'trimlbs':task_extparms.setdefault('trimlbs', False), 'required_special_tkns':['start_tknids', 'clf_tknids', 'delim_tknids'] if task_type in ['entlmnt', 'sentsim'] and (task_extparms.setdefault('sentsim_func', None) is None or not mdl_name.startswith('bert')) else ['start_tknids', 'clf_tknids'], 'special_tkns':special_tknids_args}, task_trsfm_kwargs, {'seqlen':args.maxlen, 'xpad_val':task_extparms.setdefault('xpad_val', 0), 'ypad_val':task_extparms.setdefault('ypad_val', None)}])) + (task_trsfm[1] if len(task_trsfm) >= 2 else [{}] * len(task_trsfm[0]))
    ds_kwargs = {'sampw':args.sample_weights, 'sampfrac':args.sampfrac}
    if task_type == 'nmt':
        ds_kwargs.update({'lb_coding':task_extparms.setdefault('lb_coding', 'IOB')})
    elif task_type == 'entlmnt':
        ds_kwargs.update(dict((k, task_extparms[k]) for k in ['origlb', 'lbtxt', 'neglbs', 'reflb'] if k in task_extparms))
    elif task_type == 'sentsim':
        ds_kwargs.update({'ynormfunc':task_extparms.setdefault('ynormfunc', None)})
    global_all_binlb = {}

    ext_params = dict([(k, getattr(args, k)) if hasattr(args, k) else (k, v) for k, v in config.clf_ext_params.items()])
    if hasattr(config, 'embed_type') and config.embed_type: ext_params['embed_type'] = config.embed_type
    task_params = dict([(k, getattr(args, k)) if hasattr(args, k) and getattr(args, k) is not None else (k, v) for k, v in task_extparms.setdefault('mdlcfg', {}).items()])
    logging.info('Classifier hyper-parameters: %s' % ext_params)
    logging.info('Classifier task-related parameters: %s' % task_params)
    orig_epochs = mltclf_epochs = args.epochs
    elapsed_mltclf_epochs, args.epochs = 0, 1
    if (args.resume):
        # Load model
        clf, prv_optimizer, resume, chckpnt = load_model(args.resume)
        if args.refresh:
            logging.info('Refreshing and saving the model with newest code...')
            try:
                save_model(clf, prv_optimizer, '%s_%s.pth' % (args.task, args.model))
            except Exception as e:
                logging.warning(e)
        elapsed_mltclf_epochs, all_binlb = chckpnt.setdefault('mltclf_epochs', 0), clf.binlb
        # Update parameters
        clf.update_params(task_params=task_params, **ext_params)
        if (use_gpu): clf = _handle_model(clf, dev_id=dev_id, distrb=args.distrb)
        # optmzr_cls = OPTMZR_MAP.setdefault(args.model.split('_')[0], (torch.optim.Adam, {}, None))
        optmzr_cls = config.optmzr if config.optmzr else (torch.optim.Adam, {}, None)
        optimizer = optmzr_cls[0](clf.parameters(), lr=args.lr, weight_decay=args.wdecay, **optmzr_cls[1]) if args.optim == 'adam' else torch.optim.SGD(clf.parameters(), lr=args.lr, momentum=0.9)
        if prv_optimizer: optimizer.load_state_dict(prv_optimizer.state_dict())
        training_steps = int(len(train_ds) / args.bsize) if hasattr(train_ds, '__len__') else args.trainsteps
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.wrmprop, num_training_steps=training_steps) if not args.noschdlr and len(optmzr_cls) > 2 and optmzr_cls[2] and optmzr_cls[2] == 'linwarm' else None
        logging.info((optimizer, scheduler))
    else:
        # Build model
        lm_model = gen_mdl(mdl_name, config, pretrained=True if type(args.pretrained) is str and args.pretrained.lower() == 'true' else args.pretrained, use_gpu=use_gpu, distrb=args.distrb, dev_id=dev_id) if mdl_name != 'none' else None
        clf = gen_clf(args.model, config, args.encoder, lm_model=lm_model, mlt_trnsfmr=True if task_type in ['entlmnt', 'sentsim'] and task_params.setdefault('sentsim_func', None) is not None else False, task_params=task_params, use_gpu=use_gpu, distrb=args.distrb, dev_id=dev_id, **ext_params)
        # optmzr_cls = OPTMZR_MAP.setdefault(args.model.split('_')[0], (torch.optim.Adam, {}, None))
        optmzr_cls = config.optmzr if config.optmzr else (torch.optim.Adam, {}, None)
        optimizer = optmzr_cls[0](clf.parameters(), lr=args.lr, weight_decay=args.wdecay, **optmzr_cls[1]) if args.optim == 'adam' else torch.optim.SGD(clf.parameters(), lr=args.lr, momentum=0.9)
        training_steps = int(len(train_ds) / args.bsize) if hasattr(train_ds, '__len__') else args.trainsteps
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.wrmprop, num_training_steps=training_steps) if not args.noschdlr and len(optmzr_cls) > 2 and optmzr_cls[2] and optmzr_cls[2] == 'linwarm' else None
        logging.info((optimizer, scheduler))

    # Prepare data
    logging.info('Dataset path: %s' % os.path.join(DATA_PATH, task_path))
    num_clfs = min([len(fs.listf(os.path.join(DATA_PATH, task_path), pattern='%s_\d.csv' % x)) for x in ['train', 'dev', 'test']])
    for epoch in range(elapsed_mltclf_epochs, mltclf_epochs):
        logging.info('Global %i epoch(s)...' % epoch)
        clf.reset_global_binlb()
        all_binlb = {}
        for i in range(num_clfs):
            logging.info('Training on the %s sub-dataset...' % iflteng.ordinal(i+1))
            train_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'train_%i.%s' % (i, args.fmt)), task_cols['X'], task_cols['y'], config.encode_func, tokenizer, config, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms else None, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
            new_lbs = [k for k in train_ds.binlb.keys() if k not in all_binlb]
            all_binlb.update(dict([(k, v) for k, v in zip(new_lbs, range(len(all_binlb), len(all_binlb)+len(new_lbs)))]))
            if mdl_name.startswith('bert'): train_ds = MaskedLMIterDataset(train_ds) if isinstance(train_ds, BaseIterDataset) else MaskedLMDataset(train_ds)
            lb_trsfm = [x['get_lb'] for x in task_trsfm[1] if 'get_lb' in x]
            if (not args.weight_class or task_type == 'sentsim'):
                class_count = None
            elif len(lb_trsfm) > 0:
                lb_df = train_ds.df[task_cols['y']].apply(lb_trsfm[0])
                class_count = np.array([[1 if lb in y else 0 for lb in train_ds.binlb.keys()] for y in lb_df]).sum(axis=0)
            else:
                lb_df = train_ds.df[task_cols['y']]
                binlb = task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else train_ds.binlb
                class_count = lb_df.value_counts()[binlb.keys()].values
            if (class_count is None):
                class_weights = None
                sampler = None
            else:
                class_weights = torch.Tensor(1.0 / class_count)
                class_weights /= class_weights.sum()
                class_weights *= (args.clswfac[min(len(args.clswfac)-1, i)] if type(args.clswfac) is list else args.clswfac)
                sampler = WeightedRandomSampler(weights=class_weights, num_samples=args.bsize, replacement=True)
                if type(dev_id) is list: class_weights = class_weights.repeat(len(dev_id))
            train_loader = DataLoader(train_ds, batch_size=args.bsize, shuffle=False, sampler=None, num_workers=args.np, drop_last=args.droplast)

            dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev_%i.%s' % (i, args.fmt)), task_cols['X'], task_cols['y'], config.encode_func, tokenizer, config, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else all_binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
            if mdl_name.startswith('bert'): dev_ds = MaskedLMIterDataset(train_ds) if isinstance(train_ds, BaseIterDataset) else MaskedLMDataset(dev_ds)
            dev_loader = DataLoader(dev_ds, batch_size=args.bsize, shuffle=False, num_workers=args.np)
            test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test_%i.%s' % (i, args.fmt)), task_cols['X'], task_cols['y'], config.encode_func, tokenizer, config, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else all_binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
            if mdl_name.startswith('bert'): test_ds = MaskedLMIterDataset(train_ds) if isinstance(train_ds, BaseIterDataset) else MaskedLMDataset(test_ds)
            test_loader = DataLoader(test_ds, batch_size=args.bsize, shuffle=False, num_workers=args.np)
            logging.debug(('binlb', train_ds.binlb, dev_ds.binlb, test_ds.binlb))

            # Adjust the model
            clf.get_linear(binlb=train_ds.binlb, idx=i)

            # Training on splitted datasets
            train(clf, optimizer, train_loader, config, special_tknids_args, scheduler=scheduler, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), weights=class_weights, lmcoef=args.lmcoef, clipmaxn=args.clipmaxn, epochs=args.epochs, earlystop=args.earlystop, earlystop_delta=args.es_delta, earlystop_patience=args.es_patience, task_type=task_type, task_name=args.task, mdl_name=args.model, use_gpu=use_gpu, devq=dev_id, resume=resume if args.resume else {}, chckpnt_kwargs=dict(mltclf_epochs=epoch))

            # Adjust the model
            clf_trnsfmr = MultiClfTransformer(clf)
            clf_trnsfmr.merge_linear(num_linear=i+1)
            clf.linear = _handle_model(clf.linear, dev_id=dev_id, distrb=args.distrb)

            # Evaluating on the accumulated dev and test sets
            eval(clf, dev_loader, config, dev_ds.binlbr, special_tknids_args, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=args.task, ds_name='dev', mdl_name=args.model, use_gpu=use_gpu, ignored_label=task_extparms.setdefault('ignored_label', None))
            eval(clf, test_loader, config, test_ds.binlbr, special_tknids_args, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=args.task, ds_name='test', mdl_name=args.model, use_gpu=use_gpu, ignored_label=task_extparms.setdefault('ignored_label', None))
        global_all_binlb.update(all_binlb)
        # clf.binlb = all_binlb
        # clf.binlbr = dict([(v, k) for k, v in all_binlb.items()])
    else:
        if orig_epochs > 0:
            try:
                save_model(clf, optimizer, '%s_%s.pth' % (args.task, args.model), devq=dev_id, distrb=args.distrb)
            except Exception as e:
                logging.warning(e)
    args.epochs = orig_epochs

    if args.noeval: return
    dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev.%s' % args.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer, config, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else all_binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
    if mdl_name.startswith('bert'): dev_ds = MaskedLMIterDataset(train_ds) if isinstance(train_ds, BaseIterDataset) else MaskedLMDataset(dev_ds)
    dev_loader = DataLoader(dev_ds, batch_size=args.bsize, shuffle=False, num_workers=args.np)
    test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test.%s' % args.fmt), task_cols['X'], task_cols['y'], config.encode_func, tokenizer, config, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms and type(task_extparms['binlb']) is not str else all_binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs, mltl=task_extparms.setdefault('mltl', False), **ds_kwargs)
    if mdl_name.startswith('bert'): test_ds = MaskedLMIterDataset(train_ds) if isinstance(train_ds, BaseIterDataset) else MaskedLMDataset(test_ds)
    test_loader = DataLoader(test_ds, batch_size=args.bsize, shuffle=False, num_workers=args.np)

    # Evaluation
    eval(clf, dev_loader, config, dev_ds.binlbr, special_tknids_args, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=args.task, ds_name='dev', mdl_name=args.model, use_gpu=use_gpu, ignored_label=task_extparms.setdefault('ignored_label', None))
    if args.traindev: train(clf, optimizer, dev_loader, config, special_tknids_args, scheduler=scheduler, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), weights=class_weights, lmcoef=args.lmcoef, clipmaxn=args.clipmaxn, epochs=orig_epochs, earlystop=args.earlystop, earlystop_delta=args.es_delta, earlystop_patience=args.es_patience, task_type=task_type, task_name=args.task, mdl_name=args.model, use_gpu=use_gpu, devq=dev_id)
    eval(clf, test_loader, config, test_ds.binlbr, special_tknids_args, pad_val=(task_extparms.setdefault('xpad_val', 0), train_ds.binlb[task_extparms.setdefault('ypad_val', 0)]) if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=args.task, ds_name='test', mdl_name=args.model, use_gpu=use_gpu, ignored_label=task_extparms.setdefault('ignored_label', None))
예제 #6
0
from util import trainer
from data.train_data_provider import ImageDataProvider
from model.unet import Unet
output_path = "/data/Cell/unet/model3/"
data_provider = ImageDataProvider("/data/Cell/unet/*.jpg")

net = Unet(layers=3, features_root=32, channels=3, n_class=2)
trainer = trainer.Trainer(net, optimizer="adam")
path = trainer.train(data_provider, output_path, training_iters=32, epochs=100)