Exemplo n.º 1
0
def main():
    args = get_parser()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    args.device = torch.device("cuda:0")
    torch.cuda.manual_seed(args.seed)
    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)
    args.model_path = os.path.join(args.save_path, args.model_name, 'model')
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    if args.model_name == 'generator':
        g = Generator(args)
        if args.test:
            args.model_path = os.path.join(args.save_path, 'predictor',
                                           'model')
            hs = args.hs
            args.hs = 512
            p = Predictor(args)
            args.model_path = os.path.join(args.save_path, args.model_name,
                                           'model')
            args.hs = hs
            g.meta_test(p)
        else:
            g.meta_train()
    elif args.model_name == 'predictor':
        p = Predictor(args)
        p.meta_train()
    else:
        raise ValueError('You should select generator|predictor|train_arch')
Exemplo n.º 2
0
def roll(dice_spec: str) -> Tuple[int, Sequence[int]]:
    tracer = execution_context.get_opencensus_tracer()
    try:
        with tracer.span('initial_parse'):
            tracer.add_attribute_to_current_span("dice_spec", dice_spec)
            tree = get_parser().parse(dice_spec)
    except LarkError as e:
        raise RecognitionError(
            "Sorry, I couldn't understand your request") from e
    logging.debug("Initial parse tree:\n%s", pprint(tree))
    try:
        tree = NumberTransformer().transform(tree)
        with tracer.span('dnd_knowledge'):
            tree = DnD5eKnowledge().transform(tree)
        tree = SimplifyTransformer().transform(tree)
        with tracer.span('crit_transform'):
            tree = CritTransformer().transform(tree)
        logging.debug("DnD transformed parse tree:\n%s", pprint(tree))
        with tracer.span('final_eval'):
            transformer = EvalDice()
            tree = transformer.transform(tree)
        tree = SimplifyTransformer().transform(tree)
    except VisitError as e:
        #  Get our nice exception out of lark's wrapper
        raise e.orig_exc
    return (tree.children[0], transformer.dice_results)
Exemplo n.º 3
0
def main():
	args = get_parser()
	
	if args.gpu == 'all':
		device_list = range(torch.cuda.device_count())
		args.gpu = ','.join(str(_) for _ in device_list)
	else:
		device_list = [int(_) for _ in args.gpu.split(',')]
	os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
	args.device = torch.device("cuda:0")
	args.batch_size = args.batch_size * max(len(device_list), 1)

	torch.cuda.manual_seed(args.seed)
	torch.manual_seed(args.seed)
	np.random.seed(args.seed)
	random.seed(args.seed)
	
	args.model_path = os.path.join(args.save_path, args.model_name, 'model')
	
	if args.model_name == 'generator':
		graph_config = load_graph_config(
			args.graph_data_name, args.nvt, args.data_path)
		model = PredictorModel(args, graph_config)
		d = DatabaseOFA(args, model)
	else:
		d = DatabaseOFA(args)
		
	if args.collect:
		d.collect_db()
	else:
		assert args.index is not None
		assert args.imgnet is not None
		d.make_db()
Exemplo n.º 4
0
def main():
    '''
    Initialize everything and train
    '''
    options = get_parser().parse_args()
    if not os.path.exists(options.experiment_root):
        os.makedirs(options.experiment_root)

    if torch.cuda.is_available() and not options.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    init_seed(options)
    tr_dataloader, val_dataloader, trainval_dataloader, test_dataloader = init_dataset(
        options)
    model = init_protonet(options)
    optim = init_optim(options, model)
    lr_scheduler = init_lr_scheduler(options, optim)
    res = train(opt=options,
                tr_dataloader=tr_dataloader,
                val_dataloader=val_dataloader,
                model=model,
                optim=optim,
                lr_scheduler=lr_scheduler)
    best_state, best_acc, train_loss, train_acc, val_loss, val_acc = res
    print('Testing with last model..')
    test(opt=options,
         test_dataloader=test_dataloader,
         model=model)

    model.load_state_dict(best_state)
    print('Testing with best model..')
    test(opt=options,
         test_dataloader=test_dataloader,
         model=model)
Exemplo n.º 5
0
def main():
    options = get_parser().parse_args()

    if torch.cuda.is_available() and not options.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    init_experiment(options)
    init_seed(options)

    healthy_dataloader, anomaly_dataloader = init_dataset(options)

    net_g, net_d = init_model(options)

    net_g.apply(weights_init)
    net_d.apply(weights_init)

    optim_g, optim_d = init_optimizer(options, net_g=net_g, net_d=net_d)

    if options.cuda:
        net_g = net_g.cuda()
        net_d = net_d.cuda()

    train(options,
          healthy_dataloader,
          anomaly_dataloader,
          net_g=net_g,
          net_d=net_d,
          optim_g=optim_g,
          optim_d=optim_d)
Exemplo n.º 6
0
def download(url):
    print('open connection')

    try:
        _parser = parser.get_parser(url)
    except exceptions.NotBoorusPrefixError as e:
        logger.exception("invalid prefix in {}".format(e.url))
        return
    except exceptions.SiteNotSupported as e:
        logger.exception("Site not supported {}".format(e.url))
        return
    try:
        data = _parser.get_data()
    except IndexError:
        return

    parsed_tags = _parser.tagIndex()
    print("parsed tags", parsed_tags)
    outdir = tagResponse.find_folder(parsed_tags)
    print("outdir", outdir)

    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    _parser.save_image_old_interface(outdir, data, parsed_tags)
Exemplo n.º 7
0
def main():
    parser = get_parser()
    args = parser.parse_args()
    print(args)
    sys.exit()
    if "preprocess" in args.operation_mode:
        print("prep")
        dataset = WavFilesDataset(args.input)
        dataset.save_as_hd5(args.h5_path)
        splitter = Splitter(args)
        #TODO
    
    if "train" in args.operation_mode:
        print("train")

        if args.world_size > 1:
            torch.cuda.set_device(args.rank % torch.cuda.device_count())
            distributed.init_process_group(backend=args.dist_backend,
                                       init_method="tcp://" + args.master,
                                       rank=args.rank,
                                       world_size=args.world_size)

        if(args.train_step == 1):
            print("1")
            AutoencoderTrainer(args).train()     
        else:
            print("2")
            MusicStarTrainer(args).train()

    if "translate" in args.operation_mode:
        print("translate")
        #TODO

    if "analyze" in args.operation_mode:
        print("analyze")  
Exemplo n.º 8
0
def main():
    options = get_parser().parse_args()

    if not os.path.exists(os.path.join(options.model_out,
                                       options.dataset_type)):
        os.makedirs(os.path.join(options.model_out, options.dataset_type))

    if torch.cuda.is_available() and not options.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    init_seed(options)
    tr_dataloader = init_dataloader(options, "train")
    val_dataloader = init_dataloader(options, "val")
    test_dataloader = init_dataloader(options, "test")
    model = init_model()
    optim, scheduler = init_optim_scheduler(options, model)
    best_state, best_acc, train_loss, train_acc, val_loss, val_acc = train(
        options, tr_dataloader, model, optim, scheduler, val_dataloader)

    print('Testing with last model..')
    test(options, test_dataloader, model)

    print('Testing with best model..')
    model.load_state_dict(best_state)
    test(options, test_dataloader, model)
Exemplo n.º 9
0
 def _parse_expr(self, e, for_obj):
     from interpreter.builder import Builder
     from parser import get_parser
     pars = get_parser("%s;" % e)
     e = Builder().create_expression(pars.expression().tree, for_obj)
     e = self.type.normalize(e)
     self.type.validate(e)
     return e
Exemplo n.º 10
0
 def _parse_expr(self, e, for_obj):
     from interpreter.builder import Builder
     from parser import get_parser
     pars = get_parser("%s;" % e)
     e = Builder().create_expression(pars.expression().tree, for_obj)
     e = self.type.normalize(e)
     self.type.validate(e)
     return e
Exemplo n.º 11
0
def parse_program(input: str,
                  forbid_rebuild: bool = False,
                  filename: Optional[str] = None) -> Program:
    l = parser.get_lexer()
    p = parser.get_parser(forbid_rebuild=forbid_rebuild)
    prog: Program = p.parse(input=input, lexer=l, filename=filename)
    prog.input = input
    return prog
Exemplo n.º 12
0
def main():
    # loads a lot of default parser values from the 'parser' file
    parser = file_parser.get_parser()

    # get args from parser as an object
    # MOST IMPORTANT PART, INTERMS OF CONTROLLING THE EXPERIMENTS
    args = parser.parse_args()

    # initialize seeds
    misc_utils.init_seed(args.seed)

    # set up loader
    # 2 options: class_incremental and task_incremental
    # experiments in the paper only use task_incremental
    Loader = importlib.import_module('dataloaders.' + args.loader)

    # args.loader='task_incremental_loader'
    # print('loader stuff', args)
    loader = Loader.IncrementalLoader(args, seed=args.seed)
    # print('loader stuff after after', args)
    n_inputs, n_outputs, n_tasks = loader.get_dataset_info()

    # setup logging
    # logging is from 'misc_utils.py' from 'utils' folder
    timestamp = misc_utils.get_date_time(
    )  # this line is redundant bcz log_dir already takes care of it
    args.log_dir, args.tf_dir = misc_utils.log_dir(
        args, timestamp)  # stores args into "training_parameters.json"

    # load model from the 'model' folder
    Model = importlib.import_module('model.' + args.model)
    # create the model neural net
    model = Model.Net(n_inputs, n_outputs, n_tasks, args)
    # make model cuda-ized if possible
    if args.cuda:
        try:
            model.net.cuda()
        except:
            pass

    # wandb.watch(model)

    # run model on loader
    if args.model == "iid2":
        # oracle baseline with all task data shown at same time
        result_val_t, result_val_a, result_test_t, result_test_a, spent_time = life_experience_iid(
            model, loader, args)
    else:
        # for all the CL baselines
        result_val_t, result_val_a, result_test_t, result_test_a, spent_time = life_experience(
            model, loader, args)

        # save results in files or print on terminal
        save_results(args, result_val_t, result_val_a, result_test_t,
                     result_test_a, model, spent_time)
Exemplo n.º 13
0
 def WEAPON(self, name) -> Tree:
     tracer = execution_context.get_opencensus_tracer()
     weapon = self.find_named_object(name, WEAPONS)
     dice_spec = weapon["damage_dice"]
     with tracer.span('parse_weapon'):
         tracer.add_attribute_to_current_span("name", name)
         tracer.add_attribute_to_current_span("dice_spec", dice_spec)
         tree = get_parser().parse(dice_spec, start="sum")
     logging.debug("weapon %s has damage dice %s parsed as:\n%s", name,
                   dice_spec, pprint(tree))
     return tree
Exemplo n.º 14
0
 def spell_default(self, spell: Mapping[str, Any]) -> Tree:
     tracer = execution_context.get_opencensus_tracer()
     m = re.search(r"\d+d\d+( \+ \d+)?", spell["desc"])
     if not m:
         raise ImpossibleSpellError(
             f"Sorry, I couldn't find the damage dice for %s" %
             spell["name"])
     with tracer.span('parse_spell'):
         tracer.add_attribute_to_current_span("name", spell["name"])
         tracer.add_attribute_to_current_span("dice_spec", m.group(0))
         tree = get_parser().parse(m.group(0), start="sum")
     logging.debug("spell %s has base damage dice %s parsed as:\n%s",
                   spell["name"], m.group(0), pprint(tree))
     return tree
def main():
    '''
    Initialize everything and train
    '''
    options = get_parser().parse_args()

    if torch.cuda.is_available() and not options.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    init_seed(options)
    tr_dataloader, val_dataloader, trainval_dataloader, test_dataloader = init_dataset(
        options)
    model = init_protonet(options)
    optim = init_optim(options, model)
    lr_scheduler = init_lr_scheduler(options, optim)
    best_state = train(opt=options,
                       tr_dataloader=tr_dataloader,
                       val_dataloader=val_dataloader,
                       model=model,
                       optim=optim,
                       lr_scheduler=lr_scheduler)
    print('Testing with last model..')
    test(opt=options,
         test_dataloader=test_dataloader,
         model=model)

    model.load_state_dict(best_state)
    print('Testing with best model..')
    test(opt=options,
         test_dataloader=test_dataloader,
         model=model)

    # optim = init_optim(options, model)
    # lr_scheduler = init_lr_scheduler(options, optim)

    print('Training on train+val set..')
    train(opt=options,
          tr_dataloader=trainval_dataloader,
          val_dataloader=None,
          model=model,
          optim=optim,
          lr_scheduler=lr_scheduler)

    print('Testing final model..')
    test(opt=options,
         test_dataloader=test_dataloader,
         model=model)
Exemplo n.º 16
0
def eval(opt):
    '''
    Initialize everything and train
    '''
    options = get_parser().parse_args()

    if torch.cuda.is_available() and not options.cuda:
        print(
            "WARNING: You have a CUDA device, so you should probably run with --cuda"
        )

    init_seed(options)
    test_dataloader = init_dataset(options)[-1]
    model = init_protonet(options)
    model_path = os.path.join(opt.experiment_root, 'best_model.pth')
    model.load_state_dict(torch.load(model_path))

    test(opt=options, test_dataloader=test_dataloader, model=model)
Exemplo n.º 17
0
def get_args():
    """
    Preprocess dataset

    usage: preproess.py [options] <wav-dir>

    options:
         --output-dir=<dir>      Directory where processed outputs are saved. [default: data_dir].
        -h, --help              Show help message.
    """
    parser = get_parser(description="Preprocess dataset")
    parser.add_argument('wav_dir', type=str)    
    parser.add_argument('--output-dir', type=str, help='config file',
                        default='data_dir')
    parser.add_argument('--start-id', type=int, help='preprocessing checkpoint',
                        default=0)
    #parser.add_argument('--dsp_config', type=str, help='dsp default config file',
    #                    default='./config/dsp.yaml')
    return parser.parse_args()
def main():
    '''
    Initialize everything and train
    '''
    options = get_parser().parse_args()

    if torch.cuda.is_available() and not options.cuda:
        print("WARNING: You have a CUDA device, so you should probably run with --cuda")

    init_seed(options)
    tr_dataloader, val_dataloader = init_dataset(options)
    model = init_model(options)
    optim = init_optim(options, model)
    lr_scheduler = init_lr_scheduler(options, optim)
    train(opt=options,
          tr_dataloader=tr_dataloader,
          val_dataloader=val_dataloader,
          model=model,
          optim=optim,
          lr_scheduler=lr_scheduler)
Exemplo n.º 19
0
    def __init__(self, predicate, debug=0):
        """
        Initializes the Predicate object with the string predicate.

        Arguments:
            predicate : String predicate
            debug : optional, defaults to 0. Controls the debug behavior
             of the underlying parser and lexer.

        Returns:
            Predicate object
        """
        # Validate the predicate
        if not isinstance(predicate, str):
            raise TypeError("Predicate must be a string!")

        # Initialize the literal resolver
        LiteralResolver.__init__(self)

        # Store the predicate
        self.predicate = predicate

        # Setup the lexer
        lexer = get_lexer()
        self.lexer_errors = lexer.errors

        # Setup the parser
        p = get_parser(lexer=lexer, debug=debug)
        self.parser_errors = p.errors

        # Try to get the AST tree
        try:
            self.ast = p.parse(self.predicate, lexer=lexer)
            self.ast_validated = False
            self.ast_valid = False
            self.ast_errors = None
        except Exception, e:
            self.ast = None
            self.ast_validated = True
            self.ast_valid = False
            self.ast_errors = {"errors": [str(e)], "regex": {}}
Exemplo n.º 20
0
    def __init__(self, predicate, debug=0):
        """
        Initializes the Predicate object with the string predicate.

        Arguments:
            predicate : String predicate
            debug : optional, defaults to 0. Controls the debug behavior
             of the underlying parser and lexer.

        Returns:
            Predicate object
        """
        # Validate the predicate
        if not isinstance(predicate, str):
            raise TypeError("Predicate must be a string!")

        # Initialize the literal resolver
        LiteralResolver.__init__(self)

        # Store the predicate
        self.predicate = predicate

        # Setup the lexer
        lexer = get_lexer()
        self.lexer_errors = lexer.errors

        # Setup the parser
        p = get_parser(lexer=lexer, debug=debug)
        self.parser_errors = p.errors

        # Try to get the AST tree
        try:
            self.ast = p.parse(self.predicate, lexer=lexer)
            self.ast_validated = False
            self.ast_valid = False
            self.ast_errors = None
        except Exception, e:
            self.ast = None
            self.ast_validated = True
            self.ast_valid = False
            self.ast_errors = {"errors": [str(e)], "regex": {}}
Exemplo n.º 21
0
def command_line_runner():
	"""Consumes commands to trichome."""
	command_parser = parser.get_parser()
	args = vars(command_parser.parse_args())

	if args['discover'] == 'discover':
		common_words = []
		if args['common_words']:
			common_words = read_file(args['common_words'][0])

		result = discover(args['URL'][-1], common_words)
	else:
		vector_input = None
		sensitive_input = None
		if args['vectors']:
			vector_input = read_file(args['vectors'][0])
			
		if args['sensitive']:
			sensitive_input = read_file(args['sensitive'][0])			

		tested = test(args['URL'], vector_input, sensitive_input, args['random'], args['slow'])
Exemplo n.º 22
0
    def download(self):
        self._dl_btn['state'] = DISABLED
        pipe = multiprocessing.Pipe()
        try:
            while self._list.size() > 0:
                id_list = self._list.get(0, tkinter.END)
                self._list.delete(0, tkinter.END)

                map_list = list()
                for raw_id in id_list:
                    _parser = None
                    try:
                        _parser = parser.get_parser(raw_id)
                    except exceptions.NotBoorusPrefixError as e:
                        logger.exception("invalid prefix in {}".format(e.url))
                        continue
                    except exceptions.SiteNotSupported as e:
                        logger.exception("Site not supported {}".format(e.url))
                        continue
                    try:
                        data = _parser.get_data()
                    except IndexError:
                        continue
                    try:
                        parsed_tags = _parser.tagIndex()
                    except KeyError:
                        continue
                    outdir = tagResponse.find_folder(parsed_tags)
                    if not os.path.isdir(outdir):
                        os.makedirs(outdir)
                    map_list.append((_parser, outdir, data, parsed_tags))
                dl_pool = multiprocessing.Pool(processes=config.workers)
                results = dl_pool.map(parser.save_call, map_list, chunksize=1)
                pyimglib.transcoding.statistics.update_stats(results)
        except Exception as e:
            self._add_btn['state'] = DISABLED
            messagebox.showerror(e.__class__.__name__, str(e))
            raise e
        self._current_item = None
        self._dl_btn['state'] = NORMAL
Exemplo n.º 23
0
Arquivo: main.py Projeto: yyht/La-MAML
def main():
    parser = file_parser.get_parser()

    args = parser.parse_args()

    # initialize seeds
    misc_utils.init_seed(args.seed)

    # set up loader
    # 2 options: class_incremental and task_incremental
    # experiments in the paper only use task_incremental
    Loader = importlib.import_module('dataloaders.' + args.loader)
    loader = Loader.IncrementalLoader(args, seed=args.seed)
    n_inputs, n_outputs, n_tasks = loader.get_dataset_info()

    # setup logging
    timestamp = misc_utils.get_date_time()
    args.log_dir, args.tf_dir = misc_utils.log_dir(args, timestamp)

    # load model
    Model = importlib.import_module('model.' + args.model)
    model = Model.Net(n_inputs, n_outputs, n_tasks, args)
    if args.cuda:
        try:
            model.net.cuda()            
        except:
            pass 

    # run model on loader
    if args.model == "iid2":
        # oracle baseline with all task data shown at same time
        result_val_t, result_val_a, result_test_t, result_test_a, spent_time = life_experience_iid(
            model, loader, args)
    else:
        # for all the CL baselines
        result_val_t, result_val_a, result_test_t, result_test_a, spent_time = life_experience(
            model, loader, args)

        # save results in files or print on terminal
        save_results(args, result_val_t, result_val_a, result_test_t, result_test_a, model, spent_time)
Exemplo n.º 24
0
def get_args():
    """Training WaveRNN Model.

    usage: train.py [options] <data-root>

    options:
        --checkpoint-dir=<dir>      Directory where to save model checkpoints [default: checkpoints].
        --checkpoint=<path>         Restore model from checkpoint path if given.
        -h, --help                  Show this help message and exit
    """
    parser = get_parser(description="Training WaveRNN Model")
    parser.add_argument('data_root', type=str, help='Preprocessed data dir.')
    parser.add_argument('--checkpoint-dir',
                        type=str,
                        default='checkpoints',
                        help='Directory where to save model checkpoints')
    parser.add_argument('--checkpoint',
                        type=str,
                        default=None,
                        help='Restore model from checkpoint path if given.')

    return parser.parse_args()
Exemplo n.º 25
0
def main():

    val, data = get_parser()
    print(val, data)
    if val == 'qt5':
        gs_text, series, s3_text, output_path, email, local_files_only, s3_files_only = read_qt5_input(
            data)
    elif val == 'arg':
        split_num, process_num, gs_text, series, s3_text, output_path, email, local_files_only, s3_files_only = read_args_input(
            data)
    print(local_files_only, s3_files_only)
    py3 = sys.version_info[
        0] > 2  #creates boolean value for test that Python major version > 2

    try:
        os.mkdir(output_path)
        response = 'N'
    except OSError:
        if py3:
            response = input(
                "Directory already exists. Do you still want to continue (Y/N): "
            )
        else:
            response = raw_input(
                "Directory already exists. Do you still want to continue (Y/N): "
            )
        if response != 'Y':
            sys.exit(
                "Canceled, please rerun with different Directory or path.")
    if gs_text != '':
        if s3_files_only or local_files_only:
            make_manifest(gs_text, series, s3_text, output_path, email,
                          response, local_files_only, s3_files_only)
        else:
            sra_series_fetch(split_num, process_num, gs_text, series, s3_text,
                             output_path, email, response, local_files_only,
                             s3_files_only)
Exemplo n.º 26
0
def main():
    args = parser.get_parser().parse_args()
    if args.epochs is None:
        args.epochs = 90
        args.epochs = 90

    start_epoch = 0
    ending_epoch = args.epochs

    #Todo set device

    # Create the model
    model_helper = models.create_model_helper(args.framework, args.dataset, args.arch)

    compression_scheduler = None

    if args.deprecated_resume:
        print('The "--resume" flag is deprecated. Please use "--resume-from=YOUR_PATH" instead.')
        if not args.reset_optimizer:
            print('If you wish to also reset the optimizer, call with: --reset-optimizer')
            args.reset_optimizer = True
        args.resumed_checkpoint_path = args.deprecated_resume

    if args.compress:
Exemplo n.º 27
0
 def spell(self, spell: Mapping[str, Any], level: int) -> Tree:
     tracer = execution_context.get_opencensus_tracer()
     spell_tree = self.spell_default(spell)
     if level < spell["level_int"]:
         raise ImpossibleSpellError(
             "Sorry, %s is level %d, so I can't cast it at level %d" %
             (spell["name"], spell["level_int"], level))
     m = re.search(r"\d+d\d+( + \d+)?", spell["higher_level"])
     if not m:
         raise ImpossibleSpellError(
             "Sorry, I could't determine the additional damage dice for %s"
             % spell["name"])
     with tracer.span('parse_spell_additional'):
         tracer.add_attribute_to_current_span("name", spell["name"])
         tracer.add_attribute_to_current_span("dice_spec", m.group(0))
         higher_level_tree = get_parser().parse(m.group(0), start="sum")
     logging.debug(
         "spell %s has damage dice %s per extra level parsed as:\n%s",
         spell["name"], m.group(0), pprint(higher_level_tree))
     for level in range(level - spell["level_int"]):
         spell_tree = Tree('add', [spell_tree, higher_level_tree])
     logging.debug("spell %s has complete parsed as:\n%s", spell["name"],
                   pprint(spell_tree))
     return spell_tree
Exemplo n.º 28
0
def main(argv=[]):
  args = parser.get_parser()

  verbose = 1
  if args.verbose:
    verbose = 2
  if args.debug:
    verbose = 3

  if verbose>2:
    log_level=logging.DEBUG
  elif verbose==2:
    log_level=logging.INFO
  elif verbose==1:
    log_level=logging.WARNING
  elif verbose<1:
    log_level=logging.ERROR

  logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s: %(message)s")

  proxy = args.proxy
  proxy_host = args.proxy_host
  proxy_port = args.proxy_port
  proxy_type = args.proxy_type

  config.first_run()

  if args.command == 'version':
    print torsurvey.get_version()
    return True

  elif args.command == 'update':
    return updater.check_update()

  ac = torapi.TorAPI(proxy_host=proxy_host, proxy_port=proxy_port, proxy_type=proxy_type, timeout=args.timeout)
  dbi = db.DbManager(args.dbpath)
  dbi.init()
  tc = controller.TorController(ac, dbi)
  # cx = CexMethods(ac, dbi)

  if args.command == 'checkip':
    print "Checking IP address"
    print ac.get_ip()
    return True

  elif args.command == 'flushdb':
    dbi.flush()
    print "Cleared db"

  elif args.command == 'fetch':
    if not args.url:
      logging.error("URL to fetch from required")
      return False
    print "Fetching from %s " % args.url
    f = tc.fetch_sitelist(args.url, args.cache, (not args.noinsert))
    return True

  elif args.command == 'read':
    if not args.filepath:
      logging.error("Need file to read onion addresses from")
    f = tc.read_sitelist(args.filepath, (not args.noinsert))

  elif args.command == 'survey':
    tc.survey(args.deadonly)

  elif args.command == 'list':
    dbi.list()
Exemplo n.º 29
0
def main():
    script_dir = os.path.dirname(__file__)
    module_path = os.path.abspath(os.path.join(script_dir, '..', '..'))
    global msglogger

    # Parse arguments
    args = parser.get_parser().parse_args()
    if args.epochs is None:
        args.epochs = 90

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    msglogger = apputils.config_pylogger(os.path.join(script_dir, 'logging.conf'), args.name, args.output_dir)

    # Log various details about the execution environment.  It is sometimes useful
    # to refer to past experiment executions and this information may be useful.
    apputils.log_execution_env_state(args.compress, msglogger.logdir, gitroot=module_path)
    msglogger.debug("Distiller: %s", distiller.__version__)

    start_epoch = 0
    ending_epoch = args.epochs
    perf_scores_history = []

    if args.evaluate:
        args.deterministic = True
    if args.deterministic:
        # Experiment reproducibility is sometimes important.  Pete Warden expounded about this
        # in his blog: https://petewarden.com/2018/03/19/the-machine-learning-reproducibility-crisis/
        distiller.set_deterministic()  # Use a well-known seed, for repeatability of experiments
    else:
        # Turn on CUDNN benchmark mode for best performance. This is usually "safe" for image
        # classification models, as the input sizes don't change during the run
        # See here: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/3
        cudnn.benchmark = True

    if args.cpu or not torch.cuda.is_available():
        # Set GPU index to -1 if using CPU
        args.device = 'cpu'
        args.gpus = -1
    else:
        args.device = 'cuda'
        if args.gpus is not None:
            try:
                args.gpus = [int(s) for s in args.gpus.split(',')]
            except ValueError:
                raise ValueError('ERROR: Argument --gpus must be a comma-separated list of integers only')
            available_gpus = torch.cuda.device_count()
            for dev_id in args.gpus:
                if dev_id >= available_gpus:
                    raise ValueError('ERROR: GPU device ID {0} requested, but only {1} devices available'
                                     .format(dev_id, available_gpus))
            # Set default device in case the first one on the list != 0
            torch.cuda.set_device(args.gpus[0])

    # Infer the dataset from the model name
    args.dataset = 'cifar10' if 'cifar' in args.arch else 'imagenet'
    args.num_classes = 10 if args.dataset == 'cifar10' else 1000

    if args.earlyexit_thresholds:
        args.num_exits = len(args.earlyexit_thresholds) + 1
        args.loss_exits = [0] * args.num_exits
        args.losses_exits = []
        args.exiterrors = []

    # Create the model
    model = create_model(args.pretrained, args.dataset, args.arch,
                         parallel=not args.load_serialized, device_ids=args.gpus)
    compression_scheduler = None
    # Create a couple of logging backends.  TensorBoardLogger writes log files in a format
    # that can be read by Google's Tensor Board.  PythonLogger writes to the Python logger.
    tflogger = TensorBoardLogger(msglogger.logdir)
    pylogger = PythonLogger(msglogger)

    # capture thresholds for early-exit training
    if args.earlyexit_thresholds:
        msglogger.info('=> using early-exit threshold values of %s', args.earlyexit_thresholds)

    # TODO(barrh): args.deprecated_resume is deprecated since v0.3.1
    if args.deprecated_resume:
        msglogger.warning('The "--resume" flag is deprecated. Please use "--resume-from=YOUR_PATH" instead.')
        if not args.reset_optimizer:
            msglogger.warning('If you wish to also reset the optimizer, call with: --reset-optimizer')
            args.reset_optimizer = True
        args.resumed_checkpoint_path = args.deprecated_resume

    # We can optionally resume from a checkpoint
    optimizer = None
    if args.resumed_checkpoint_path:
        model, compression_scheduler, optimizer, start_epoch = apputils.load_checkpoint(
            model, args.resumed_checkpoint_path, model_device=args.device)
    elif args.load_model_path:
        model = apputils.load_lean_checkpoint(model, args.load_model_path,
                                              model_device=args.device)
    if args.reset_optimizer:
        start_epoch = 0
        if optimizer is not None:
            optimizer = None
            msglogger.info('\nreset_optimizer flag set: Overriding resumed optimizer and resetting epoch count to 0')

    # Define loss function (criterion)
    criterion = nn.CrossEntropyLoss().to(args.device)

    if optimizer is None:
        optimizer = torch.optim.SGD(model.parameters(),
            lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        msglogger.info('Optimizer Type: %s', type(optimizer))
        msglogger.info('Optimizer Args: %s', optimizer.defaults)

    if args.AMC:
        return automated_deep_compression(model, criterion, optimizer, pylogger, args)
    if args.greedy:
        return greedy(model, criterion, optimizer, pylogger, args)

    # This sample application can be invoked to produce various summary reports.
    if args.summary:
        return summarize_model(model, args.dataset, which_summary=args.summary)

    activations_collectors = create_activation_stats_collectors(model, *args.activation_stats)

    if args.qe_calibration:
        msglogger.info('Quantization calibration stats collection enabled:')
        msglogger.info('\tStats will be collected for {:.1%} of test dataset'.format(args.qe_calibration))
        msglogger.info('\tSetting constant seeds and converting model to serialized execution')
        distiller.set_deterministic()
        model = distiller.make_non_parallel_copy(model)
        activations_collectors.update(create_quantization_stats_collector(model))
        args.evaluate = True
        args.effective_test_size = args.qe_calibration

    # Load the datasets: the dataset to load is inferred from the model name passed
    # in args.arch.  The default dataset is ImageNet, but if args.arch contains the
    # substring "_cifar", then cifar10 is used.
    train_loader, val_loader, test_loader, _ = apputils.load_data(
        args.dataset, os.path.expanduser(args.data), args.batch_size,
        args.workers, args.validation_split, args.deterministic,
        args.effective_train_size, args.effective_valid_size, args.effective_test_size)
    msglogger.info('Dataset sizes:\n\ttraining=%d\n\tvalidation=%d\n\ttest=%d',
                   len(train_loader.sampler), len(val_loader.sampler), len(test_loader.sampler))

    if args.sensitivity is not None:
        sensitivities = np.arange(args.sensitivity_range[0], args.sensitivity_range[1], args.sensitivity_range[2])
        return sensitivity_analysis(model, criterion, test_loader, pylogger, args, sensitivities)

    if args.evaluate:
        return evaluate_model(model, criterion, test_loader, pylogger, activations_collectors, args,
                              compression_scheduler)

    if args.compress:
        # The main use-case for this sample application is CNN compression. Compression
        # requires a compression schedule configuration file in YAML.
        compression_scheduler = distiller.file_config(model, optimizer, args.compress, compression_scheduler,
            (start_epoch-1) if args.resumed_checkpoint_path else None)
        # Model is re-transferred to GPU in case parameters were added (e.g. PACTQuantizer)
        model.to(args.device)
    elif compression_scheduler is None:
        compression_scheduler = distiller.CompressionScheduler(model)

    if args.thinnify:
        #zeros_mask_dict = distiller.create_model_masks_dict(model)
        assert args.resumed_checkpoint_path is not None, \
            "You must use --resume-from to provide a checkpoint file to thinnify"
        distiller.remove_filters(model, compression_scheduler.zeros_mask_dict, args.arch, args.dataset, optimizer=None)
        apputils.save_checkpoint(0, args.arch, model, optimizer=None, scheduler=compression_scheduler,
                                 name="{}_thinned".format(args.resumed_checkpoint_path.replace(".pth.tar", "")),
                                 dir=msglogger.logdir)
        print("Note: your model may have collapsed to random inference, so you may want to fine-tune")
        return

    args.kd_policy = None
    if args.kd_teacher:
        teacher = create_model(args.kd_pretrained, args.dataset, args.kd_teacher, device_ids=args.gpus)
        if args.kd_resume:
            teacher = apputils.load_lean_checkpoint(teacher, args.kd_resume)
        dlw = distiller.DistillationLossWeights(args.kd_distill_wt, args.kd_student_wt, args.kd_teacher_wt)
        args.kd_policy = distiller.KnowledgeDistillationPolicy(model, teacher, args.kd_temp, dlw)
        compression_scheduler.add_policy(args.kd_policy, starting_epoch=args.kd_start_epoch, ending_epoch=args.epochs,
                                         frequency=1)

        msglogger.info('\nStudent-Teacher knowledge distillation enabled:')
        msglogger.info('\tTeacher Model: %s', args.kd_teacher)
        msglogger.info('\tTemperature: %s', args.kd_temp)
        msglogger.info('\tLoss Weights (distillation | student | teacher): %s',
                       ' | '.join(['{:.2f}'.format(val) for val in dlw]))
        msglogger.info('\tStarting from Epoch: %s', args.kd_start_epoch)

    if start_epoch >= ending_epoch:
        msglogger.error(
            'epoch count is too low, starting epoch is {} but total epochs set to {}'.format(
            start_epoch, ending_epoch))
        raise ValueError('Epochs parameter is too low. Nothing to do.')
    for epoch in range(start_epoch, ending_epoch):
        # This is the main training loop.
        msglogger.info('\n')
        if compression_scheduler:
            compression_scheduler.on_epoch_begin(epoch,
                metrics=(vloss if (epoch != start_epoch) else 10**6))

        # Train for one epoch
        with collectors_context(activations_collectors["train"]) as collectors:
            train(train_loader, model, criterion, optimizer, epoch, compression_scheduler,
                  loggers=[tflogger, pylogger], args=args)
            distiller.log_weights_sparsity(model, epoch, loggers=[tflogger, pylogger])
            distiller.log_activation_statsitics(epoch, "train", loggers=[tflogger],
                                                collector=collectors["sparsity"])
            if args.masks_sparsity:
                msglogger.info(distiller.masks_sparsity_tbl_summary(model, compression_scheduler))

        # evaluate on validation set
        with collectors_context(activations_collectors["valid"]) as collectors:
            top1, top5, vloss = validate(val_loader, model, criterion, [pylogger], args, epoch)
            distiller.log_activation_statsitics(epoch, "valid", loggers=[tflogger],
                                                collector=collectors["sparsity"])
            save_collectors_data(collectors, msglogger.logdir)

        stats = ('Performance/Validation/',
                 OrderedDict([('Loss', vloss),
                              ('Top1', top1),
                              ('Top5', top5)]))
        distiller.log_training_progress(stats, None, epoch, steps_completed=0, total_steps=1, log_freq=1,
                                        loggers=[tflogger])

        if compression_scheduler:
            compression_scheduler.on_epoch_end(epoch, optimizer)

        # Update the list of top scores achieved so far, and save the checkpoint
        update_training_scores_history(perf_scores_history, model, top1, top5, epoch, args.num_best_scores)
        is_best = epoch == perf_scores_history[0].epoch
        checkpoint_extras = {'current_top1': top1,
                             'best_top1': perf_scores_history[0].top1,
                             'best_epoch': perf_scores_history[0].epoch}
        apputils.save_checkpoint(epoch, args.arch, model, optimizer=optimizer, scheduler=compression_scheduler,
                                 extras=checkpoint_extras, is_best=is_best, name=args.name, dir=msglogger.logdir)

    # Finally run results on the test set
    test(test_loader, model, criterion, [pylogger], activations_collectors, args=args)
Exemplo n.º 30
0
def main(argv=[]):
  args = parser.get_parser()

  verbose = 1
  if args.verbose:
    verbose = 2
  if args.debug:
    verbose = 3

  if verbose>2:
    log_level=logging.DEBUG
  elif verbose==2:
    log_level=logging.INFO
  elif verbose==1:
    log_level=logging.WARNING
  elif verbose<1:
    log_level=logging.ERROR

  logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s: %(message)s")

  if args.command == 'version':
    print cexbot.get_version()
    return True

  # make sure this is always above command parsing
  # print config
  config.first_run()

  if verbose == 3:
    print args

  if args.command == 'config':
    if args.list:
      return config.list()
    elif args.edit:
      return config.edit_config()
    elif args.testauth:
      return config.test_auth()
    elif args.name and args.value:
      v = config.set(args.name, args.value)
      return config.cprint(args.name)
    elif args.name:
      return config.cprint(args.name)
    logging.error('Invalid config option')
    return 1

  elif args.command == 'update':
    return updater.check_update()

  # not implemented
  elif args.command == 'cleardata':
    return config.clear_userdata()


  ac = cexapi.CexAPI(config.get('cex.username'), config.get('cex.apikey'), config.get('cex.secret'))
  dbi = db.DbManager()
  cx = CexMethods(ac, dbi)

  if args.command == 'balance':
    print "Balance: %s BTC" % ac.get_balance()
    return True

  elif args.command == 'initdb':
    return dbi.initdb()

  elif args.command == 'getmarket':
    return ac.get_market()

  elif args.command == 'getprice':
    return ac.get_market_quote()

  elif args.command == 'order':
    amount = args.amount
    price = args.price
    r = ac.place_order(amount, price)
    logging.info("Ordered: %s" % r)

  elif args.command == 'updatequotes':
    logging.info('Running updatequotes')
    ticker_timer = timer.ReqTimer(2, cx.update_ticker)
    ticker_timer.start()

  elif args.command == 'buybalance':
    logging.info('Running buybalance')
    balance_timer = timer.ReqTimer(5, ac.buy_balance)
    balance_timer.start()
import numpy as np
from tqdm import tqdm

from parser import get_parser
from environments import EnvHub
from agents import AgentHub
from lib import plotting

if __name__ == '__main__':
    parser = get_parser()
    args = parser.parse_args()

    env = EnvHub.get_env(env_id=args.env_id, save=args.save)
    get_agent = AgentHub[args.agent_id]

    agent = get_agent(
        action_space=env.action_space,
        discount_factor=args.discount_factor,
    )

    stats = plotting.EpisodeStats(
        episode_lengths=np.zeros(args.num_episodes),
        episode_rewards=np.zeros(args.num_episodes),
    )

    p_bar = tqdm(range(args.num_episodes))
    for i_episode in p_bar:
        state = env.reset()
        done = False
        t = 0
        while not done:
Exemplo n.º 32
0
import time

import numpy as np

import torch
torch.multiprocessing.set_start_method('spawn', force=True)
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
from torch.multiprocessing import Process

import parser
import dataloader
import hlsgd_logging

args = parser.get_parser().parse_args()
"""
##########################################################################################
#
#   Get Arguments From Parser.
#
##########################################################################################
"""

debug_mode_enabled = args.debug
world_size = args.world_size
batch_size = args.batch_size
lr = args.learning_rate
epoch_size = args.epoch_size
gpu = args.gpu
training_model = args.model
Exemplo n.º 33
0
        print "Server name: ", server.name
        print "Server id: ", server.id
        print "Networks: ", server.networks

    def delete(self):
        """
        Deletes the server if the file already exists.

        """
        server = self.get_server()
        if server:
            server.delete()
            print "Deleting server %s" % server.name
            self.wait_for_status('deleted', fail_on_not_exist=False)
            os.remove(self._config['id_file'])
        else:
            print "Nothing to delete."

if __name__ == '__main__':
    parser = get_parser()
    args = parser.parse_args()
    conf = get_config(args)

    d = Devstork(conf)
    # Run the subcommand - either create or delete
    if args.action == 'create':
        d.create(wait_for_userdata=args.wait_for_userdata)
    elif args.action == 'delete':
        d.delete()
Exemplo n.º 34
0
                logstr += 'Epoch {}, used time: {}, {}'.format(
                    n_epoch,
                    time.time() - epoch_tick, result)
                logger.log(logstr)
            manager.stop()

        except Exception as e:
            print(e)
            if not manager.stop():
                manager.stop()
            raise
    return sess


if __name__ == '__main__':
    args = get_parser(sys.argv[1:])
    tick = time.time()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.device

    #load all necessary data
    local_data_path = 'data/{}_byuser.pk'.format(args.CITY)
    train_set, test_set = load_local_dataset(local_data_path)
    data_path = os.path.join(
        os.path.join(
            args.ROOT, 'data',
            '{}_INTV_processed_voc5_len2_setting_WITH_GPS_WITH_TIME_WITH_USERID.pk'
            .format(args.CITY)))
    _, dicts = load_data(data_path)
    args = complete_args(args, dicts)
    del _
Exemplo n.º 35
0
            elements = process_elements(grammar, rulename, elements)
            grammar.add_rule(rulename, *elements)
        while tail:
            item, tail = tail
            if item is None:
                continue
            rulename, alternation = item
            for elements in alternation:
                elements = process_elements(grammar, rulename, elements)
                grammar.add_rule(rulename, *elements)
        return grammar



# (1) Get the tokenizer (2) Compile the grammar (3) Build the parser
abnf_grammar.get_tokenizer()
abnf_grammar.compile_grammar(Context)
abnf_parser = get_parser(abnf_grammar, 'rulelist')


###########################################################################
# The Public API
###########################################################################
def build_grammar(data, context_class=None):
    context = Context(data)
    grammar = abnf_parser.run(data, context)
    grammar.get_tokenizer()
    grammar.compile_grammar(context_class)
    return grammar