Exemplo n.º 1
0
def cli_main():
    parser = get_convert_parser()
    parser.add_argument(
        '--lang', '-l', default='zh', choices=['zh', 'en'],
        help='file encoding'
    )
    _args = parse_arguments(parser)
    main(_args)
Exemplo n.º 2
0
def run():
    v = options.parse_arguments().verbosity

    comm = arb.mpi_comm(mpi.COMM_WORLD)
    alloc = arb.proc_allocation()
    ctx = arb.context(alloc, comm)
    rank = ctx.rank

    if rank == 0:
        runner = unittest.TextTestRunner(verbosity=v)
    else:
        sys.stdout = open(os.devnull, 'w')
        runner = unittest.TextTestRunner(stream=sys.stdout)

    runner.run(suite())
Exemplo n.º 3
0
def run():
    v = options.parse_arguments().verbosity

    if not arb.mpi_is_initialized():
        arb.mpi_init()

    comm = arb.mpi_comm()
    alloc = arb.proc_allocation()
    ctx = arb.context(alloc, comm)
    rank = ctx.rank

    if rank == 0:
        runner = unittest.TextTestRunner(verbosity=v)
    else:
        sys.stdout = open(os.devnull, 'w')
        runner = unittest.TextTestRunner(stream=sys.stdout)

    runner.run(suite())

    if not arb.mpi_is_finalized():
        arb.mpi_finalize()
Exemplo n.º 4
0
import sys
import opt
import os
import metrics.segmentation_metrics
import metrics.reasoning_metrics

from models import get_model
from options import parse_arguments
from data import SOR3DLoader, SOR3DLoaderParams
from metrics.segmentation_metrics import F1_Score
from utils import NullVisualizer, VisdomVisualizer, initialize_vgg_weights, initialize_weights, generate_gt_heatmap

if __name__ == '__main__':
    print("{} | Torch Version: {}".format(datetime.datetime.now(),
                                          torch.__version__))
    args, uknown = parse_arguments(sys.argv)
    device = torch.device("cuda:0" if args.cuda else "cpu")
    torch.manual_seed(667)
    if device.type == 'cuda':
        torch.cuda.manual_seed(667)

    # visdom init
    visualizer = NullVisualizer() if args.visdom is None\
        else VisdomVisualizer(args.name, args.visdom, count=4)
    if args.visdom is None:
        args.visdom_iters = 0

    # data
    train_data_params = SOR3DLoaderParams(
        root_path=os.path.join(args.train_path, 'train'))
    train_data_iterator = SOR3DLoader(train_data_params)
Exemplo n.º 5
0
    # add more if needed

test_modules = [\
    test_contexts,\
    test_domain_decompositions,\
    test_event_generators,\
    test_identifiers,\
    test_schedules,\
    test_cable_probes,\
    test_morphology\
] # add more if needed


def suite():
    loader = unittest.TestLoader()

    suites = []
    for test_module in test_modules:
        test_module_suite = test_module.suite()
        suites.append(test_module_suite)

    suite = unittest.TestSuite(suites)

    return suite


if __name__ == "__main__":
    v = options.parse_arguments().verbosity
    runner = unittest.TextTestRunner(verbosity=v)
    runner.run(suite())
Exemplo n.º 6
0
def cli_main():
    _parser = get_filter_parser()
    _args = parse_arguments(_parser)
    main(_args)
Exemplo n.º 7
0
        #  The actual training and validation step for each epoch  #
        ############################################################
        train_loss, train_metric = train_model(model, train_loader, epoch,
                                               optimizer, writer, opts)

        with torch.no_grad():
            val_loss, val_metric = evaluate_model(model, val_loader, epoch,
                                                  writer, opts)

            ##############################
            #  Adjust the learning rate  #
            ##############################
            if opts.lr_scheduler == 'plateau':
                scheduler.step(val_loss)
            elif opts.lr_scheduler == 'step':
                scheduler.step()

        t_end = time.time()
        delta = t_end - t_start

        utils.print_epoch_progress(epoch, opts.epochs, train_loss, val_loss,
                                   delta, train_metric, val_metric)

    t_end_training = time.time()
    print('training took {}s'.format(t_end_training - t_start_training))


if __name__ == '__main__':
    opts = parse_arguments()
    main(opts)
Exemplo n.º 8
0
def parse_bpe_arguments():
    return parse_arguments(get_bpe_parser())
Exemplo n.º 9
0
import numpy as np
import os, sys, time
import torch
import importlib

import options
from util import log

log.process(os.getpid())
log.title("[{}] (evaluate SDF-SRN)".format(sys.argv[0]))

opt_cmd = options.parse_arguments(sys.argv[1:])
opt = options.set(opt_cmd=opt_cmd)

with torch.cuda.device(opt.device):

    model = importlib.import_module("model.{}".format(opt.model))
    m = model.Model(opt)

    m.load_dataset(opt,eval_split="test" if opt.data.dataset=="shapenet" else \
                                  "val" if opt.data.dataset=="pascal3d" else None)
    m.build_networks(opt)
    m.restore_checkpoint(opt)
    m.setup_visualizer(opt)

    m.evaluate(opt)
Exemplo n.º 10
0
def cli_main():
    main(parse_arguments(get_flen_parser()))
Exemplo n.º 11
0
def parse_shuffle_arguments():
    return parse_arguments(get_shuffle_parser())
Exemplo n.º 12
0
def parse_stat_arguments():
    parser = get_csgm_parser()
    return parse_arguments(parser)
Exemplo n.º 13
0
            print(
                ('Early stopping after {0} iterations without the decrease ' +
                 'of the val loss').format(iteration_change_loss))
            break

    t_end_training = time.time()
    print(f'training took {t_end_training - t_start_training}s')
    print(f'Best validation accuracy: {best_val_accu}')
    print(f'Best validation loss: {best_val_loss}')
    print(f'Best validation precision: {best_val_prec}')
    print(f'Best validation recall: {best_val_rec}')
    print(f'Best validation f1: {best_val_f1}')
    print(f'Best validation AUC: {best_val_auc}')

    with torch.no_grad():
        if opts.train_mode in ['combined', 'oversampling']:
            model.load_state_dict(
                torch.load(
                    os.path.join(model_dir, opts.run_name,
                                 'best_state_dict.pth')))
        test_loss, test_metric = evaluate_model(model, test_loader, opts)

    print(f'The best test F1: {test_metric["f1"]}')
    print(f'The best test auc: {test_metric["auc"]}')
    print(f'The best test accuracy: {test_metric["accuracy"]}')


if __name__ == "__main__":
    opts = options.parse_arguments()
    main(opts)
Exemplo n.º 14
0
def parse_dup_arguments():
    return parse_arguments(get_dup_parser())
Exemplo n.º 15
0
def parse_bpe_arguments():
    return parse_arguments(get_mapping_parser())
Exemplo n.º 16
0
def __main():
    # change to top dir
    dir_run_from = os.getcwd()
    top_dir = os.path.dirname(sys.argv[0])
    if top_dir and top_dir != dir_run_from:
        os.chdir(top_dir)

    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR)
    hist_log = HistLog(os.path.join(LOG_DIR, RUN_LOG),
                       os.path.join(LOG_DIR, SEARCH_LOG))
    stats_log = StatsJsonLog(os.path.join(LOG_DIR, STATS_LOG))

    try:
        from src import config
    except ImportError:
        print("\nFailed to import configuration file")
        _log_hist_log(hist_log)
        raise

    args = parse_arguments()
    # browser cookies
    cookies = args.cookies

    # microsoft email/pw
    if args.email and args.password:
        email = args.email
        password = args.password
        cookies = False
    else:
        email = __decode(config.credentials['email'])
        password = __decode(config.credentials['password'])

    # telegram credentials
    telegram_messenger = get_telegram_messenger(config, args)

    rewards = Rewards(email, password, DEBUG, args.headless, cookies,
                      args.driver)
    completion = hist_log.get_completion()
    search_hist = hist_log.get_search_hist()
    search_type = args.search_type

    try:
        complete_search(rewards, completion, search_type, search_hist)

        if hasattr(rewards, 'stats'):
            stats_log.write(rewards.stats, email)
            if telegram_messenger:
                telegram_messenger.send_reward_message(rewards.stats, email)

        hist_log.write(rewards.completion, rewards.search_hist)
        completion = hist_log.get_completion()

        # check again, log if any failed
        if not completion.is_search_type_completed(search_type):
            logging.basicConfig(level=logging.DEBUG,
                                format='%(message)s',
                                filename=os.path.join(LOG_DIR, ERROR_LOG))
            logging.debug(hist_log.get_timestamp())
            for line in rewards.stdout:
                logging.debug(line)
            logging.debug("")

    except:  # catch *all* exceptions
        _log_hist_log(hist_log)
        hist_log.write(rewards.completion, rewards.search_hist)
        if telegram_messenger:
            # send error msg to telegram
            import traceback
            error_msg = traceback.format_exc()
            telegram_messenger.send_message(error_msg)
        raise
Exemplo n.º 17
0
def __main():
    args = parse_arguments()
    # change to top dir
    dir_run_from = os.getcwd()
    top_dir = os.path.dirname(sys.argv[0])
    if top_dir and top_dir != dir_run_from:
        os.chdir(top_dir)

    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR)
    hist_log = HistLog(
        os.path.join(LOG_DIR, RUN_LOG), os.path.join(LOG_DIR, SEARCH_LOG)
    )

    #browser cookies
    cookies = args.cookies

    # get credentials
    if args.email and args.password:
        email = args.email
        password = args.password
        cookies = False
    else:
        try:
            from src import config
        except ImportError:
            print("\nFailed to import configuration file")
            logging.basicConfig(
                level=logging.DEBUG,
                format='%(message)s',
                filename=os.path.join(LOG_DIR, ERROR_LOG)
            )
            logging.exception(hist_log.get_timestamp())
            logging.debug("")
            raise
        email = __decode(config.credentials['email'])
        password = __decode(config.credentials['password'])

    if not os.path.exists(DRIVERS_DIR):
        os.mkdir(DRIVERS_DIR)
    rewards = Rewards(
        os.path.join(DRIVERS_DIR, DRIVER), email, password, DEBUG, args.headless, cookies
    )
    completion = hist_log.get_completion()

    try:
        if args.search_type == 'remaining':
            print("\n\t{}\n".format("You selected remaining"))

            if not completion.is_all_completed():
                #complete_all() is fastest method b/c it doesn't open new webdriver for each new search type, so even if already completed method is tried again, it has very low overhead.
                if not completion.is_web_search_completed(
                ) and not completion.is_mobile_search_completed():
                    rewards.complete_all(hist_log.get_search_hist())
                #higher overhead, opens a new webdriver for each unfinished search type
                else:
                    if not completion.is_edge_search_completed():
                        rewards.complete_edge_search(hist_log.get_search_hist())
                    if not completion.is_web_search_completed():
                        rewards.complete_web_search(hist_log.get_search_hist())
                    if not completion.is_offers_completed():
                        rewards.complete_offers()
                    if not completion.is_mobile_search_completed():
                        rewards.complete_mobile_search(
                            hist_log.get_search_hist()
                        )

                hist_log.write(rewards.completion, rewards.search_hist)
                completion = hist_log.get_completion()
                if not completion.is_all_completed(
                ):  # check again, log if any failed
                    logging.basicConfig(
                        level=logging.DEBUG,
                        format='%(message)s',
                        filename=os.path.join(LOG_DIR, ERROR_LOG)
                    )
                    logging.debug(hist_log.get_timestamp())
                    for line in rewards.stdout:
                        logging.debug(line)
                    logging.debug("")

            else:
                print("Nothing remaining")
        elif args.search_type == 'web':
            print("\n\t{}\n".format("You selected web search"))
            if not completion.is_edge_and_web_search_completed():
                if not completion.is_edge_search_completed():
                    rewards.complete_edge_search(hist_log.get_search_hist())
                if not completion.is_web_search_completed():
                    rewards.complete_web_search(hist_log.get_search_hist())
                hist_log.write(rewards.completion, rewards.search_hist)
            else:
                print('Web search already completed')
        elif args.search_type == 'mobile':
            print("\n\t{}\n".format("You selected mobile search"))
            if not completion.is_edge_and_mobile_search_completed():
                if not completion.is_edge_search_completed():
                    rewards.complete_edge_search(hist_log.get_search_hist())
                if not completion.is_mobile_search_completed():
                    rewards.complete_mobile_search(hist_log.get_search_hist())
                hist_log.write(rewards.completion, rewards.search_hist)
            else:
                print('Mobile search already completed')
        elif args.search_type == 'both':
            print(
                "\n\t{}\n".format("You selected both searches (web & mobile)")
            )
            if not completion.is_both_searches_completed():
                rewards.complete_both_searches(hist_log.get_search_hist())
                hist_log.write(rewards.completion, rewards.search_hist)
            else:
                print('Both searches already completed')
        elif args.search_type == 'offers':
            print("\n\t{}\n".format("You selected offers"))
            if not completion.is_offers_completed():
                rewards.complete_offers()
                hist_log.write(rewards.completion, rewards.search_hist)
            else:
                print('Offers already completed')
        elif args.search_type == 'all':
            print("\n\t{}\n".format("You selected all"))
            if not completion.is_all_completed():
                rewards.complete_all(hist_log.get_search_hist())
                hist_log.write(rewards.completion, rewards.search_hist)
            else:
                print('All already completed')

    except:  # catch *all* exceptions
        logging.basicConfig(
            level=logging.DEBUG,
            format='%(message)s',
            filename=os.path.join(LOG_DIR, ERROR_LOG)
        )
        logging.exception(hist_log.get_timestamp())
        logging.debug("")

        hist_log.write(rewards.completion, rewards.search_hist)
        raise
def run():
    v = options.parse_arguments().verbosity
    runner = unittest.TextTestRunner(verbosity=v)
    runner.run(suite())
Exemplo n.º 19
0
def parse_stat_arguments():
    return parse_arguments(get_stat_parser())