Beispiel #1
0
def main():
    """
    The main method, which checks environment vars and collects all of the required input options.
    Returns 0 on success
    """

    # Read in the command line options
    args, work_dir, docleanup, cspp_afire_version, logfile = argument_parser()

    # Check various paths and environment variables that are "must haves".
    try:

        _, afire_home = check_and_convert_env_var('CSPP_ACTIVE_FIRE_HOME')
        _, afire_ancil_path = check_and_convert_env_var(
            'CSPP_ACTIVE_FIRE_STATIC_DIR')
        _ = check_and_convert_path(None,
                                   os.path.join(afire_home,
                                                'static_ancillary'),
                                   check_write=False)
        _ = check_and_convert_path(None, work_dir, check_write=False)

    except CsppEnvironment as e:
        LOG.error(e.value)
        LOG.error(
            'Installation error, Make sure all software components were installed.'
        )
        return 2

    afire_options = {}
    afire_options['inputs'] = args.inputs
    afire_options['afire_home'] = os.path.abspath(afire_home)
    afire_options['i_band'] = args.i_band
    afire_options['work_dir'] = os.path.abspath(args.work_dir)
    afire_options['ancil_dir'] = afire_ancil_path
    afire_options['cache_dir'] = setup_cache_dir(args.cache_dir,
                                                 afire_options['work_dir'],
                                                 'CSPP_ACTIVE_FIRE_CACHE_DIR')
    afire_options['ancillary_only'] = args.ancillary_only
    afire_options['cache_window'] = args.cache_window
    afire_options['preserve_cache'] = args.preserve_cache
    afire_options['num_cpu'] = args.num_cpu
    afire_options['docleanup'] = docleanup
    afire_options['version'] = cspp_afire_version

    rc = 0
    try:

        attempted_runs, successful_runs, crashed_runs, problem_runs = process_afire_inputs(
            work_dir, afire_options)

        LOG.info('attempted_runs    {}'.format(attempted_runs))
        LOG.info('successful_runs   {}'.format(successful_runs))
        LOG.info('problem_runs      {}'.format(problem_runs))
        LOG.info('crashed_runs      {}'.format(crashed_runs))

    except Exception:
        LOG.error(traceback.format_exc())
        rc = 1

    return rc
Beispiel #2
0
def main():
    """
    Load data and train a model on it.
    """
    args = argument_parser().parse_args()
    print(args)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.set_device(0)

    train_set, val_set, test_set = read_dataset(DATA_DIR)

    if not args.pretrained:
        print('Training...')
        train(train_set, test_set, args.checkpoint, **train_kwargs(args))
    else:
        print('Restoring from checkpoint...')
        model_state, op_state, meta_iteration, cur_meta_step_size, accuracy_tracking = load_checkpoint(
            args.checkpoint)
        train(train_set, test_set, args.checkpoint, model_state, op_state,
              **train_kwargs(args))

    print('\nEvaluating...')
    model_state, op_state, meta_iteration, cur_meta_step_size, accuracy_tracking = load_checkpoint(
        args.checkpoint)
    do_evaluation(model_state, op_state, args.checkpoint, val_set, test_set,
                  train_set)
Beispiel #3
0
    def load_args(self):

        if isinstance(self.params,list):
            temp_argv = sys.argv
            sys.argv[1:] = self.params

            # global variables
            parser = argument_parser()

            self.args = parser.parse_args()
            sys.argv = temp_argv
        else:
            self.args = Namespace(**self.params)



        print(self.args)
Beispiel #4
0
def main():
    args = argument_parser()

    dataset_files = {
        'seq2seq': {
            'train': args.data_dir + '/formatted_movie_lines.txt',
            'dev': args.data_dir + '/formatted_movie_lines.txt',
            'bak': '/formatted_movie_lines_bak.txt'
        },
    }

    save_dirs = {
        'senti2class': 'senti2class_models',
        'senti3class': 'senti3class_models',
        'senti6class': 'senti6class_models'
    }

    args.dataset_files = dataset_files[args.name]
    #args.tgt_size = tgt_sizes[args.name]
    #args.save_dir = save_dirs[args.name]

    logger = get_logger(args)

    logger.info(args)
    check_gpu(args, logger)

    program = TorchProgram(args, logger)

    if args.train:
        program.train()
        write_config(args, logger)

    if args.test and args.load_dir:
        program.test_seq2seq(program.vocab)

    if args.export:
        program.export(args.export_path)
from torchreid.data_manager import VideoDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, TripletLoss, DeepSupervision
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, load_pretrained_weights
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.utils.generaltools import set_random_seed
from torchreid.eval_metrics import evaluate
from torchreid.samplers import RandomIdentitySampler
from torchreid.optimizers import init_optimizer
from torchreid.lr_schedulers import init_lr_scheduler

# global variables
parser = argument_parser()
args = parser.parse_args()


def main():
    global args

    set_random_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    if args.use_cpu: use_gpu = False
    log_name = 'log_test.txt' if args.evaluate else 'log_train.txt'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print('==========\nArgs:{}\n=========='.format(args))
Beispiel #6
0
"""
Helpers for evaluating models.
"""
import os
import numpy as np
import tqdm
import pandas as pd

from reptile import Reptile
from args import argument_parser, evaluate_kwargs
from util import mean_confidence_interval

args = argument_parser().parse_args()
eval_kwargs = evaluate_kwargs(args)


# pylint: disable=R0913,R0914
def evaluate(dataset,
             model_state,
             op_state,
             num_classes=5,
             num_shots=5,
             eval_inner_batch_size=5,
             eval_inner_iters=50,
             num_samples=10000,
             transductive=False,
             cuda=False,
             pin_memory=False,
             foml=False):
    """
    Evaluate a model on a dataset.
Beispiel #7
0
spacy_en = spacy.load('en')


def tokenizer(text):  # create a tokenizer function
    # 返回 a list of <class 'spacy.tokens.token.Token'>
    return [tok.text for tok in spacy_en.tokenizer(text)]


from torchtext import data

import numpy as np
from data import text_utils

if __name__ == '__main__':
    args = argument_parser()
    with open("seq2seq/bak/TEXT.Field", "rb") as f:
        TEXT = dill.load(f)

    LENGTH = data.Field(sequential=False, use_vocab=False)

    embeddings = np.random.random((len(TEXT.vocab.itos), args.embed_size))
    args.TEXT = TEXT

    encoder = SN_MODELS["encoder"](embeddings, args)
    # atten = SN_MODELS["attention"](args.hidden_size * 4, 300)
    #decoder = SN_MODELS["decoder"](embeddings, args)
    atten = SN_MODELS["attention"](args.hidden_size, "general")
    decoder = SN_MODELS["decoder"](embeddings, args, atten)

    model_class = SN_MODELS[args.model_name]