コード例 #1
0
def main(BASE_DIR):
    # Parse planner args
    args = arguments.parse_args()

    # Run PDDL translator (from TFD)
    prb = args.problem
    if args.domain:
        domain = args.domain
        task = translate.pddl.open(prb, domain)
    else:
        task = translate.pddl.open(prb)
        domain = utils.getDomainName(prb)

    # Compute initial horizon estimate querying a satisficing planner
    # here: ENHSP by Enrico Scala

    hplan = BASE_DIR + '/enhsp/enhsp'
    val = BASE_DIR + '/bin/validate'

    print('Start horizon computation...')

    try:
        out = subprocess.check_output([
            hplan, '-o', domain, '-f', prb, '-s', 'gbfs', '-ties', 'smaller_g',
            '-h', 'haddabs'
        ])

    except subprocess.CalledProcessError as e:
        sys.exit()

    # Extract plan length from output of ENHSP - actions to be done
    match = re.search('Plan-Length:(\d+)', out)
    if match:
        initial_horizon = int(match.group(1))
        print('Intial horizon: {}'.format(initial_horizon))

    else:
        # Computing horizon with GBFS failed for some reason
        print('Could not determine initial horizon with GBFS...')

        # Print output of ENHSP for diagnosis and exit
        print(out)
        sys.exit()

    # Compose encoder and search according to user flags
    e = encoder.Encoder(task, modifier.LinearModifier())
    s = search.LinearSearch(e, initial_horizon)
    plan = s.do_search()

    # Validate and print plan
    try:
        if plan.validate(val, domain, prb):
            print('\nPlan found!')
            plan.do_print()
        else:
            print('Plan not valid, exiting now...')
            sys.exit()
    except Exception as e:
        print('Could not validate plan, exiting now...')
        sys.exit()
コード例 #2
0
def run():
    args = arguments.parse_args()

    cfg = K.tf.ConfigProto()
    cfg.gpu_options.allow_growth = True
    K.set_session(K.tf.Session(config=cfg))

    if '2d' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_2d(args, args.iterations)
    elif '1d' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_1d(args, args.iterations)
    elif 'mlp' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_mlp(args, args.iterations)
    elif '1d_anno' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_1d_anno(args, args.iterations)
    elif '2d_anno' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_2d_anno(args, args.iterations)
    elif 'ab_spatial' == args.mode:
        ho = HyperparameterOptimizer()
        pa = ho.get_baseline_2d_params()
        pb = ho.get_baseline_2d_params()
        pb['spatial_dropout'] = False
        ho.ab_test_2d(args, pa, pb)
    else:
        raise ValueError('Error! Unknown hyperprameter optimizer mode:',
                         args.mode)
コード例 #3
0
def trainable(config, name_fmt, envname, trainingconfig, evaluate_mean_n):
    # Parse arguments
    trial_dir = Path(tune.get_trial_dir()) if tune.get_trial_dir(
    ) is not None else Path.cwd()
    adv_force = config["adv_force"]
    name = name_fmt.format(adv_force=adv_force)
    cmd_args = [
        '--name', name, '--env', envname, '--log', '--trainingconfig',
        str(trainingconfig), '--root',
        str(trial_dir), '--monitor-dir',
        str(monitor_dir_name(envname, adv_force))
    ]
    cmd_args += ['--adv_force', str(adv_force)]
    args = parse_args(cmd_args)
    # Add adversarial force
    logging.info(f'Running {name=} with {args=}')

    def evaluate(prot, ts):
        # reward = get_mean_reward_last_n_steps(evaluate_mean_n, args.monitor_dir)
        # logging.info(f'{name} {reward=:.2f} {ts=}')
        # tune.report(reward=reward)
        robustness = eval_robustness(args, prot, envname, trainingconfig, name)
        logging.info(f'{name} {robustness=:.2f} {ts=}')
        tune.report(robustness=robustness)

    run(args, evaluate_fn=evaluate)
コード例 #4
0
ファイル: main.py プロジェクト: bstee615/rarl
def main():
    logging.basicConfig(level=logging.INFO)
    args = parse_args()
    result = run(args)
    if result is not None:
        avg_reward, std_reward = result
        logging.info(f'reward={avg_reward}+={std_reward}')
コード例 #5
0
def run():
    args = arguments.parse_args()
    if '2d' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_2d(args, args.iterations)
    elif '1d' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_1d(args, args.iterations)
    elif 'mlp' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_mlp(args, args.iterations)
    elif '1d_anno' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_1d_anno(args, args.iterations)
    elif '2d_anno' == args.mode:
        ho = HyperparameterOptimizer()
        ho.bayesian_search_2d_anno(args, args.iterations)
    elif 'ab_spatial' == args.mode:
        ho = HyperparameterOptimizer()
        pa = ho.get_baseline_2d_params()
        pb = ho.get_baseline_2d_params()
        pb['spatial_dropout'] = False
        ho.ab_test_2d(args, pa, pb)
    else:
        raise ValueError('Error! Unknown hyperprameter optimizer mode:',
                         args.mode)
コード例 #6
0
def saveScatter(Xs, Ys, args=None):
    cnt = len(Xs)
    # fig, axs = plt.subplots(1, cnt, figsize=(10, 10))
    #
    # if cnt == 1:
    #     f1_data = np.concatenate((Xs[0], Ys[0]), axis=2)
    #     f1_data = f1_data.reshape(f1_data.shape[0], f1_data.shape[1]*f1_data.shape[2])
    #     f1_data = np.transpose(f1_data, (1, 0))
    #     axs.scatter(f1_data[0], f1_data[1])
    # else:
    #     for i, (x, y) in enumerate(zip(Xs, Ys)):
    #         f1_data = np.concatenate((x, y), axis=2)
    #         f1_data = f1_data.reshape(f1_data.shape[0], f1_data.shape[1]*f1_data.shape[2])
    #         f1_data = np.transpose(f1_data, (1, 0))
    #
    #         axs[i].scatter(f1_data[0], f1_data[1])
    colors = ['green', 'blue', 'yellow', 'red', 'black']
    for i, (x, y) in enumerate(zip(Xs, Ys)):
        plt.scatter(x, y, color=colors[i])
    if args is None:
        args = arguments.parse_args()
    fig_path = fh.getStoragePath() + 'Figures/'
    fh.makeDirectories(fig_path)
    plt.savefig(fig_path + 'Layer_%d_Hidden_%d_Epoch_%d.png' %
                (args.n_layers, args.n_hidden, args.num_epochs))
    plt.clf()
コード例 #7
0
def main():
    args = parse_args()

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )

    # multi-gpu training (should be after apex fp16 initialization)
    if args.n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare GLUE tasks
    processors = {}
    output_modes = {}
    label_lists = {}
    num_label_list = {}
    # args.task_params = open(args.task_params)
    for key in args.task_params:
        print(processors)
        processors[key] = glue_processors[key]()
        output_modes[key] = glue_output_modes[key]
        label_lists[key] = processors[key].get_labels()
        num_label_list[key] = len(label_lists[key])

    # Configs
    configs = {}
    for key in args.task_params:
        configs[key] = AutoConfig.from_pretrained(args.model_type,
                                                  # args.config_name if args.config_name else args.model_name_or_path,
                                                  num_labels=num_label_list[key],
                                                  finetuning_task=key,
                                                  cache_dir=None,
                                                  )

    # Tokenizer
    tokenizer = AutoTokenizer.from_pretrained(
        args.model_type,
        do_lower_case=args.do_lower_case,
        cache_dir=None,
    )

    # Continual Learning
    # no of task configurations specified in example_task.json
    n = len(configs)
    # final Accuracy of all tasks
    accuracy_matrix = np.zeros((n, n))

    # TODO
    transfer_matrix = np.zeros((n, n))

    # List of all tasks defined in example_task.json
    tasks = list(args.task_params.keys())
    models = []
    # Model
    # Load specific model for each task and added it to the models  list
    for key in args.task_params:
コード例 #8
0
def main():
    """Program entrypoint"""
    try:
        args = parse_args()
        args.command(args)
    except Exception as error:
        print(error)
        sys.exit(1)
コード例 #9
0
def run():
	args = arguments.parse_args()
	args.labels = defines.calling_labels
	if args.mode == 'tensors':
		infer_tensor(args)
	elif args.mode == 'vcf':
		infer_vcf(args)
	else:
		raise ValueError('Unknown calling mode:', args.mode)		
コード例 #10
0
def main():
	'''Main function to be used when calling getdc.py 
	
	Keyword arguments:
	args.domain -- Domain Name (required, accepted list or single value, default none)
	args.nameserver -- NameServer (optional, accepted hostname/ipaddress, default none)
	args.format -- Format output type, (required, accepted values [json, host, ip, hostip, zerologon)
	args.verbose -- Toggle debug meesages to stdout (required, accepted values boolean)
	args.exchange -- Toggle whether to look up exchange records (required, default false)
	'''
	# Arguments from argparse
	args = arguments.parse_args()

	# python logger
	if args.verbose:
		logging.basicConfig(level=logging.DEBUG)
	else:
		pass

	# Hostname dictionary
	host_dct = {
	}

	# Run func query()
	for domain in args.domain:
		host_dct[domain] = {} # initiate nested dict(s) from args.domain
		answer_srv = query(domain, service='_kerberos.', protocol='_tcp.', recordtype='SRV', nameserver=args.nameserver) # call query(srv) to find dc hostname
		# Exchange was defined
		if args.exchange:
			answer_srv = query(domain, service='_autodiscover.', protocol='_tcp.', recordtype='SRV', nameserver=args.nameserver) # call query(srv) to find exchange hostname
		
		for hostname in answer_srv:
			host_dct[domain][str(hostname)] = '' # populat host_dct with hostname
			answer_a = query(hostname, service='', protocol='', recordtype='A', nameserver=args.nameserver) # call query(a) to find dc ipaddress
			ipaddress = '\n'.join(answer_a) # convert list to string
			host_dct[domain][str(hostname)] = ipaddress # populate host_dct with ipaddress
		
	# format type output json
	if args.format == 'json': # stdout json
		json_data = json.dumps(host_dct, indent=4, sort_keys=True) # convert host_dct to json
		print(json_data)
	# format type output
	for domain in args.domain:
		if args.format == 'host':
			for key in sorted(host_dct[domain].keys()): # stdout hostname
				print(key)
		if args.format == 'ip': 
			for value in sorted(host_dct[domain].values()): # stdout ipaddress
				print(value)
		if args.format == 'hostip':
			for key, value in sorted(host_dct[domain].items()): # stdout hostname/ipaddress
				print(key, value)
		if args.format == 'zerologon':
			for key, value in sorted(host_dct[domain].items()): # stdout netbios-name/ipaddress
				print(key.split('.')[0], value)
コード例 #11
0
ファイル: train.py プロジェクト: jonasbrami/qmdp-net
def main(arglist):
    params = parse_args(arglist)
    print(params)

    if params.epochs == 0:
        assert len(params.loadmodel) == 1
        modelfile = params.loadmodel[0]
    else:
        modelfile = run_training(params)

    run_eval(params, modelfile)
コード例 #12
0
def main():
    '''Main function to be used when calling getdc.py 
	
	Keyword arguments:
	args.domain -- Domain Name (required, accepted list or single value,default none)
	args.nameserver -- NameServer (optional, accepted hostname/ipaddress, default none)
	args.format -- Format output type, (required, accepted values json/host/ip/hostip'NameServer, default json)
	args.verbose -- Toggle debug meesages to stdout (required, accepted values boolean)
	'''
    # Arguments from argparse
    args = arguments.parse_args()

    # python logger
    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)
    else:
        pass

    # DomainController dictionary
    dc_dct = {}

    # Run main func query()
    for domain in args.domain:
        dc_dct[domain] = {}  # initiate nested dict(s) from args.domain
        answer_srv = query(
            domain, recordtype='SRV',
            nameserver=args.nameserver)  # call main func query(srv)

        for hostname in answer_srv:
            dc_dct[domain][str(hostname)] = ''  # populat dc_dict with hostname
            answer_a = query(
                hostname, recordtype='A',
                nameserver=args.nameserver)  # call the func query(a)
            ipaddress = '\n'.join(answer_a)  # convert list to string
            dc_dct[domain][str(
                hostname)] = ipaddress  # populate dc_dict with ipaddress

    # format type output json
    if args.format == 'json':  # stdout json
        json_data = json.dumps(dc_dct, indent=4,
                               sort_keys=True)  # convert dc_dict to json
        print(json_data)
    # format type output
    for domain in args.domain:
        if args.format == 'host':
            for key in sorted(dc_dct[domain].keys()):  # stdout hostname
                print(key)
        if args.format == 'ip':
            for value in sorted(dc_dct[domain].values()):  # stdout ipaddress
                print(value)
        if args.format == 'hostip':
            for key, value in sorted(
                    dc_dct[domain].items()):  # stdout hostname/ipaddress
                print(key, value)
コード例 #13
0
def main():

    args = parse_args()
    set_seed(args.seed)

    image_folder = args.data_dir
    annotation_csv = args.annotation_dir
    model_dir = args.model_dir
    depth_folder = args.depth_dir

    trainloader, valloader = LoadData(depth_folder, image_folder,
                                      annotation_csv, args)

    model = UNet_Encoder_Decoder(3, args)
    model.to(args.device)
    model = model.apply(weight_init)

    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    criterion = nn.BCEWithLogitsLoss()
    mse_criterion = nn.MSELoss()
    num_epochs = args.num_train_epochs

    if (not os.path.exists(model_dir)):
        os.mkdir(model_dir)

    best_eval_acc = 0.0

    for epoch in tqdm(range(num_epochs)):
        data_len = len(trainloader)
        model.train()
        running_loss, model = train_epoch(model, trainloader, args,
                                          all_criterion)

        eval_roadmap_loss, eval_depth_loss, eval_acc = evaluate(
            model, valloader, args, (criterion, mse_criterion))
        print(
            '[%d, %5d] Loss: %.3f Eval Roadmap Loss: %.3f Eval Depth Loss: %.3f Eval ThreatScore: %.3f'
            % (epoch + 1, num_epochs, running_loss /
               (args.per_gpu_batch_size * data_len), eval_roadmap_loss,
               eval_depth_loss, eval_acc))

        torch.save(model.state_dict(),
                   os.path.join(model_dir, 'model_' + str(epoch) + '.pth'))
        if eval_acc > best_eval_acc:
            torch.save(
                model.state_dict(),
                os.path.join(model_dir, 'bestmodel_' + str(epoch) + '.pth'))
            best_eval_acc = eval_acc
コード例 #14
0
def main():
    args = parse_args()
    _print_args(args)

    if (
            os.path.isfile(downloader.MODEL_DOWNLOADER_PATH)
            and os.path.isfile(downloader.MODEL_CONVERTER_PATH)
            and os.path.isfile(downloader.MODEL_OPTIMIZER_PATH)
    ):
        downloader.download_and_convert_models(
            args.model_list, args.output_dir, args.force, args.dl_streamer_version
        )
    else:
        print(
            "Intel(R) distribution of OpenVINO(TM) Toolkit tools not "
            "found. Please check if all dependent tools are installed and try again."
        )
        sys.exit(1)
コード例 #15
0
def main():
    try:
        args = parse_args()
        _log_options(args)
        frame_source = None
        frame_queue = queue.Queue()
        result_queue = queue.Queue()
        msp = MediaStreamProcessor(args.grpc_server_address,
                                   args.use_shared_memory)

        _, extension = os.path.splitext(args.sample_file)
        if extension in ['.png', '.jpg']:
            frame_source = ImageSource(args.sample_file, args.loop_count)
        elif extension in ['.mp4']:
            frame_source = VideoSource(args.sample_file)
        else:
            print("{}: unsupported file type".format(args.sample_file))
            sys.exit(1)

        width, height = frame_source.dimensions()
        print("{} {}".format(width, height))
        msp.start(width, height, frame_queue, result_queue)

        with open(args.output_file, "w") as output:
            image = frame_source.get_frame()
            while image:
                frame_queue.put(image)
                while not result_queue.empty():
                    result = result_queue.get()
                    print_result(result, output)
                image = frame_source.get_frame()
            frame_queue.put(None)

            result = result_queue.get()
            while result:
                print_result(result, output)
                result = result_queue.get()

        frame_source.close()

    except Exception:
        log_exception()
        sys.exit(-1)
コード例 #16
0
    # print_rank_0(f'AutoMP: embedding_output = {embedding_output}')

    def gpt2_attention_mask_func(attention_scores, ltor_mask):
        attention_scores.masked_fill_(ltor_mask, -10000.0)
        return attention_scores

    transformer = ParallelTransformer(
        attention_mask_func=gpt2_attention_mask_func,
        num_layers=args.num_layers,
        hidden_size=args.hidden_size,
        layernorm_epsilon=args.layernorm_epsilon,
        num_attention_heads=args.num_attention_heads,
        attention_dropout=0.1,
        hidden_dropout=0.1)

    attention_mask, loss_mask, position_ids = get_ltor_masks_and_position_ids(
        input_indices, vocab_size - 1)

    transformer_output = transformer.forward(hidden_states=embedding_output,
                                             attention_mask=attention_mask)
    print_rank_0(f'AutoMP: transformer_output = {transformer_output}')


if __name__ == '__main__':
    # Parse command line arguments
    parse_args()

    args = get_args()

    train()
コード例 #17
0
def main():
    args = parse_args()

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )

    # Prepare GLUE tasks
    processors = {}
    output_modes = {}
    label_lists = {}
    num_label_list = {}
    for key in args.task_params:
        print(processors)
        processors[key] = glue_processors[key]()
        output_modes[key] = glue_output_modes[key]
        label_lists[key] = processors[key].get_labels()
        num_label_list[key] = len(label_lists[key])

    # Configs
    configs = {}
    for key in args.task_params:
        configs[key] = AutoConfig.from_pretrained(args.model_type,
                                                  # args.config_name if args.config_name else args.model_name_or_path,
                                                  num_labels=num_label_list[key],
                                                  finetuning_task=key,
                                                  cache_dir=None,
                                                  )

    # Tokenizer
    tokenizer = AutoTokenizer.from_pretrained(
        args.model_type,
        do_lower_case=args.do_lower_case,
        cache_dir=None,
    )

    # Continual Learning
    n = len(configs)
    accuracy_matrix = np.zeros((n, n))
    transfer_matrix = np.zeros((n, n))

    tasks = list(args.task_params.keys())
    models = []
    # Model
    for key in args.task_params:
        if args.n_gpu <= 1:
            # if key == 'mrpc':
            #     models.append((key,
            #                    torch.load('mrpc_bert_2_5e5.ckpt')))
            #                    # shared_model(AutoModelForSequenceClassification.from_pretrained(args.model_type))))
            #     accuracy_matrix = np.array([[0.86 , 0.5], [0, 0]])
            # else:
            models.append((key,
                           shared_model(AutoModelForSequenceClassification.from_pretrained(args.model_type))))
        else:
            models.append((key,
                           shared_model(torch.nn.DataParallel(
                               AutoModelForSequenceClassification.from_pretrained(args.model_type)))))

    for i in range(n):
        models[i][1].to(args.device)
        save_model(args, i, models[i][1])

    for i in range(len(configs)):
        # if i == 0 and models[i][0] == 'mrpc':
        #     continue
        # else:
        if i > 0:
            part_name = partial_name(args)
            # Always load the BERT parameters of previous model
            models[i][1].tmodel.load_state_dict(
                torch.load(os.path.join(args.output_dir, part_name, "bert_paramters_" + str(i - 1) + ".pt")), strict=False)
        models[i][1].reg_params = models[i - 1][1].reg_params
        new_args = convert_dict(args.task_params[tasks[i]], args)
        train_dataset = load_and_cache_examples(args, tasks[i], tokenizer, evaluate=False)
        global_step, tr_loss, accuracy_matrix, new_model = train(new_args, train_dataset, tasks[i], tasks, models[i][1],
                                                                 i, tokenizer, accuracy_matrix)
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)

    print()
    print("***** Accuracy Matrix *****")
    print('regularization lambda = {}'.format(args.reg_lambda))
    print()

    print(accuracy_matrix)

    print()
    print("***** Transfer Matrix *****")
    # print("Future Transfer => Upper Triangular Matrix  ||  Backward Transfer => Lower Triangular Matrix")
    print()

    for i in range(n):
        for j in range(n):
            transfer_matrix[j][i] = accuracy_matrix[j][i] - accuracy_matrix[i][i]

    print(transfer_matrix)
コード例 #18
0
def main():
    params = parse_args(default_files=('./gibson_submission.conf', ))
    is_submission = (params.gibson_mode == 'submission')

    if is_submission:
        from gibson2.envs.challenge import Challenge

        challenge = Challenge()
        agent = MappingAgent(params)
        challenge.submit(agent)

        print("Done with submission.")
        raise SystemExit

    if params.gibson_track != '':
        os.environ['SIM2REAL_TRACK'] = params.gibson_track
    track = os.environ['SIM2REAL_TRACK']

    # challenge.submit(agent)
    models = sorted(os.listdir(gibson2.dataset_path))
    skip = []
    if params.gibson_split == "custom":
        # # models = models[:5]
        # models = models[14:15]
        # # models = models[-10:]
        # num_episodes_per_floor = 1
        # num_repeats = 1
        models = models[:-10]
        num_episodes_per_floor = 5
        num_repeats = 8
        skip = range(708)
    elif params.gibson_split == "train":
        # models = models[67:-10] + models[:66]
        # models = models[:66] + models[67:-10]
        models = models[:-10]
        num_episodes_per_floor = 5
        num_repeats = 4
    elif params.gibson_split == "test":
        models = models[-10:]
        num_episodes_per_floor = 5
        num_repeats = 4
        # models = models[-3:]
    elif params.gibson_split == "evaltest":
        models = models[-10:]
        num_episodes_per_floor = 10
        num_repeats = 1
        # models = models[-3:]
    elif params.gibson_split == "minitest":
        models = models[-10:]
        num_episodes_per_floor = 5
        num_repeats = 1
        # models = models[-3:]
    else:
        raise ValueError("Unknown split %s" % params.gibson_split)

    models = models

    if params.gibson_mode == 'gen_maps':
        num_repeats = 1

        print(models)
        assert len(models) < 1000

        print("Generating maps for %d models." % len(models))
        for model_i, model_name in enumerate(models):
            # p = Process(target=save_episodes_helper, args=(args, model_name, output_filename + '.{:03d}'.format(i), num_episodes_per_floor=num_episodes_per_floor))
            # p.start()
            # p.join()
            # p.terminate()
            if model_i in skip:
                continue
            run_in_separate_process(gen_map_helper, params, model_name)

    elif params.gibson_mode in ['gen_scenarios']:
        models = models * num_repeats
        print(models)
        assert len(models) < 1000

        output_filename = './data/scenarios_{}_{}_{}.tfrecords'.format(
            params.gibson_split, track,
            time.strftime('%m-%d-%H-%M-%S', time.localtime()))

        for model_i, model_name in enumerate(models):
            if model_i in skip:
                continue
            run_in_separate_process(
                save_episodes_helper,
                model_name,
                output_filename + '.{:03d}'.format(model_i),
                num_episodes_per_floor=num_episodes_per_floor)

    elif params.gibson_mode in ['eval']:
        models = models * num_repeats
        print(models)
        assert len(models) < 1000

        evaluate_episodes(params, models, num_episodes_per_floor)

    elif params.gibson_mode in ['evalsubmission']:
        models = models * num_repeats
        print(models)
        assert len(models) < 1000
        from gibson2.envs.challenge import Challenge

        for model_i, model_name in enumerate(models):
            challenge = Challenge()
            agent = MappingAgent(params)
            challenge.submit(agent)

    else:

        raise ValueError("Unknown gibson_mode=%s" % params.gibson_mode)
コード例 #19
0
def main():
    import arguments
    arguments.parse_args()
コード例 #20
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))

    # parser.add_argument('n_gpu', default=1)
    """
        if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")                                              args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()                                                                       else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs                                                torch.cuda.set_device(args.local_rank)                                                                                                  device = torch.device("cuda", args.local_rank)                                                                                          torch.distributed.init_process_group(backend="nccl")                                                                                    args.n_gpu = 1                                                                                                                      args.device = device
    """
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        # If we pass only one argument to the script and it's the path to a json file,
        # let's parse it to get our arguments.
        model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
    else:
        # try:
        model_args, data_args, training_args = parser.parse_args_into_dataclasses()
        # except ValueError as e:
        #    print('Additional arguments passed not parsed by ArgumentParser: {}'.format(e))

    if (
        os.path.exists(training_args.output_dir)
        and os.listdir(training_args.output_dir)
        and training_args.do_train
        and not training_args.overwrite_output_dir
    ):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
        )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
    )
    logger.warning(
        "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
        training_args.local_rank,
        training_args.device,
        training_args.n_gpu,
        bool(training_args.local_rank != -1),
        training_args.fp16,
    )
    logger.info("Training/evaluation parameters %s", training_args)
    args = arguments.parse_args()
    # Set seed
    set_seed(training_args.seed if args.seed is None else args.seed)

    try:
        num_labels = glue_tasks_num_labels[data_args.task_name] if random_labels == 0 else random_labels
        output_mode = glue_output_modes[data_args.task_name]
    except KeyError:
        raise ValueError("Task not found: %s" % (data_args.task_name))

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    config = SANConfig(
        model_args.config_name if model_args.config_name else model_args.model_name_or_path,
        num_labels=num_labels,
        hidden_act="gelu",
        intermediate_size=args.hidden_dim,
        finetuning_task=data_args.task_name,
        cache_dir=model_args.cache_dir,
    )
    config.max_position_embeddings = -1

    config.train_loss_l = []
    # Can enable test_width to observe effects of widths.
    test_width = False
    if test_width:
        depth = True
        depth = False

        if depth:
            print("DEPTH")
            config.num_hidden_layers = 3  # 8 #8
            config.num_attention_heads = 3  # 8
            dim_per_head = 51
        else:
            print("BREADTH")
            config.num_hidden_layers = 6  # 5
            config.num_attention_heads = 1  # 1 #10 6
            dim_per_head = 64
        print(config.num_hidden_layers, config.num_attention_heads)
        config.hidden_size = config.num_attention_heads * dim_per_head
        config.intermediate_size = 2 * config.hidden_size

    if do_paths:
        config.num_hidden_layers = args.depth  # 1
        config.num_attention_heads = args.width  # 1
        config.hidden_dropout_prob = 0.1
        config.hidden_size = args.hidden_dim
        config.intermediate_size = config.hidden_size  # *2
        # hidden_size is total hidden dimension, to be divided across heads, num_heads is number of heads

    tokenizer = AutoTokenizer.from_pretrained(
        model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
        cache_dir=model_args.cache_dir,
    )
    if do_paths or test_width:
        # model = AutoModelForSequenceClassification.from_config(config=config)
        model = SANForSequenceClassification(config=config)
    else:
        model = AutoModelForSequenceClassification.from_pretrained(
            model_args.model_name_or_path,
            from_tf=bool(".ckpt" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )
    if do_paths or test_width:
        # count params
        total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        for name, p in model.named_parameters():
            print("p {} size {}".format(name, p.numel()))
        print("+++TOTAL model params {}+++".format(total_params))

    model_path = "modelNLP{}d{}_{}_{}.pt".format(
        config.num_attention_heads, config.num_hidden_layers, config.hidden_size, int(training_args.num_train_epochs)
    )  # modelNLP4d6_256_10.pt
    if os.path.exists(model_path):
        state_dict = torch.load(model_path, map_location="cpu")
        model.load_state_dict(state_dict)
        model = model.to(training_args.device)
        training_args.num_train_epochs = 0

    # Get datasets
    train_dataset = (
        GlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir) if training_args.do_train else None
    )
    eval_dataset = (
        GlueDataset(data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
        if training_args.do_eval
        else None
    )
    test_dataset = (
        GlueDataset(data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir)
        if training_args.do_predict
        else None
    )

    def build_compute_metrics_fn(task_name: str) -> Callable[[EvalPrediction], Dict]:
        def compute_metrics_fn(p: EvalPrediction):
            if output_mode == "classification":
                preds = np.argmax(p.predictions, axis=1)
            elif output_mode == "regression":
                preds = np.squeeze(p.predictions)
            return glue_compute_metrics(task_name, preds, p.label_ids) if random_labels == 0 else 0

        return compute_metrics_fn

    if random_labels:
        print("Note: random labels assigned to data!")
        target_feat_len = min(len(train_dataset.features), args.n_train_data)
        train_dataset.features = train_dataset.features[:target_feat_len]

        # traindataset consists of InputFeatures objects, each having label, attention mask, input_ids, etc.
        # torch.manual_seed(44)
        rand = torch.randint(random_labels, (len(train_dataset),))
        n_tok = len(train_dataset[0].input_ids)
        # Learn and evaluate at token-level.
        test_token = True
        for i, feat in enumerate(train_dataset):
            if test_token:
                rand_labels = torch.randint(random_labels, (n_tok,))  # len(feat.input_ids)
                rand_labels[torch.Tensor(feat.attention_mask) == 0] = -1

                feat.label = rand_labels
            else:
                feat.label = rand[i]

    training_args.logging_steps = 100
    # Initialize our Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        compute_metrics=build_compute_metrics_fn(data_args.task_name),
        args_sort=args,
    )

    # Training
    if training_args.do_train:
        trainer.train(
            model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
        )
        trainer.save_model()
        # For convenience, we also re-save the tokenizer to the same directory,
        # so that you can share your model easily on huggingface.co/models =)
        if trainer.is_world_master():
            tokenizer.save_pretrained(training_args.output_dir)

    # Evaluation
    eval_results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        # Loop to handle MNLI double evaluation (matched, mis-matched)
        ##eval_datasets = [eval_dataset]
        eval_datasets = [train_dataset]
        if data_args.task_name == "mnli":
            mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
            eval_datasets.append(
                GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="dev", cache_dir=model_args.cache_dir)
            )

        # config.no_sub_path = args.no_sub_path
        # generate paths
        # convex_hull.create_path(path_len, args, all_heads=args.all_heads )
        for eval_dataset in eval_datasets:
            trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
            trainer.train(
                model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
            )

            # eval_result = trainer.evaluate(eval_dataset=eval_dataset)

            output_eval_file = os.path.join(
                training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
            )
            if trainer.is_world_master():
                with open(output_eval_file, "w") as writer:
                    logger.info("***** Eval results {} *****".format(eval_dataset.args.task_name))
                    for key, value in eval_result.items():
                        logger.info("  %s = %s", key, value)
                        writer.write("%s = %s\n" % (key, value))

            eval_results.update(eval_result)

    if training_args.do_predict:
        logging.info("*** Test ***")
        test_datasets = [test_dataset]
        if data_args.task_name == "mnli":
            mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
            test_datasets.append(
                GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="test", cache_dir=model_args.cache_dir)
            )

        for test_dataset in test_datasets:
            predictions = trainer.predict(test_dataset=test_dataset).predictions
            if output_mode == "classification":
                predictions = np.argmax(predictions, axis=1)

            output_test_file = os.path.join(
                training_args.output_dir, f"test_results_{test_dataset.args.task_name}.txt"
            )
            if trainer.is_world_master():
                with open(output_test_file, "w") as writer:
                    logger.info("***** Test results {} *****".format(test_dataset.args.task_name))
                    writer.write("index\tprediction\n")
                    for index, item in enumerate(predictions):
                        if output_mode == "regression":
                            writer.write("%d\t%3.3f\n" % (index, item))
                        else:
                            item = test_dataset.get_labels()[item]
                            writer.write("%d\t%s\n" % (index, item))

    return eval_results
コード例 #21
0
    for late in lates:
        print(late.value)

    return []


def dist(start, end):
    (ys, xs), (ye, xe) = start, end
    return abs(ys - ye) + abs(xs - xe)


def build_times(demand):
    def check(first, second):
        ride = dist(first[0], first[1]) + dist(first[1], second[0])
        return first[2] + ride - second[2]

    times = [[check(first, second) for second in demand] for first in demand]
    return times


# --------------------------- Argument parsing ---------------------------------
if __name__ == '__main__':
    parsed_args = parse_args(True)
    file_name = parsed_args.pop('file_name')

    if file_name == 'all':
        for file_name in file_names:
            main(**parsed_args)
    else:
        main(**parsed_args)
コード例 #22
0
def main():
    args = parse_args()

    # Set the GPU to use
    torch.cuda.set_device(args.gpu)

    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    vqa_loader = dataset.get_train_dataloader(osp.expanduser(args.annotations),
                                              osp.expanduser(args.questions),
                                              args.images,
                                              args,
                                              raw_images=args.raw_images,
                                              transforms=transform)
    # We always use the vocab from the training set
    vocab = vqa_loader.dataset.vocab

    maps = {
        "vocab": vocab,
        "word_to_wid": vqa_loader.dataset.word_to_wid,
        "wid_to_word": vqa_loader.dataset.wid_to_word,
        "ans_to_aid": vqa_loader.dataset.ans_to_aid,
        "aid_to_ans": vqa_loader.dataset.aid_to_ans,
    }
    val_transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    val_loader = dataset.get_val_dataloader(osp.expanduser(
        args.val_annotations),
                                            osp.expanduser(args.val_questions),
                                            args.val_images,
                                            args,
                                            raw_images=args.raw_images,
                                            maps=maps,
                                            vocab=vocab,
                                            shuffle=False,
                                            transforms=val_transform)

    arch = Models[args.arch].value
    model = arch(len(vocab),
                 output_dim=args.top_answer_limit,
                 raw_images=args.raw_images)

    if args.resume:
        state = torch.load(args.resume)
        model.load_state_dict(state["model"])

    model.cuda()

    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           betas=tuple(args.betas),
                           weight_decay=args.weight_decay)
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=args.decay_interval,
                                    gamma=args.lr_decay)

    if args.visualize:
        vis = visualize.Visualizer(args.port)
    else:
        vis = None

    print("Beginning training")
    print("#" * 80)

    for epoch in range(args.start_epoch, args.epochs):
        scheduler.step()

        trainer.train(model,
                      vqa_loader,
                      criterion,
                      optimizer,
                      epoch,
                      args,
                      vis=vis)
        trainer.evaluate(model, val_loader, criterion, epoch, args, vis=vis)

    print("Training complete!")
コード例 #23
0
ファイル: rl_cross_eval.py プロジェクト: aobject/gym-pcgrl
def compile_results(settings_list):
    batch_exp_name = settings_list[0]["experiment_id"]
    #   if batch_exp_name == "2":
    RL_DIR = "rl_runs"
    #   elif batch_exp_name == "1":
    #       EVO_DIR = "evo_runs_06-13"
    #       RL_DIR = "evo_runs_06-14"
    #   ignored_keys = set(
    #       (
    #           "exp_name",
    #           "evaluate",
    #           "show_vis",
    #           "visualize",
    #           "render_levels",
    #           "multi_thread",
    #           "play_level",
    #           "evaluate",
    #           "save_levels",
    #           "cascade_reward",
    #           "model",
    #           "n_generations",
    #           "render",
    #           "infer",
    #       )
    #   )
    #   keys = []

    #   for k in settings_list[0].keys():
    #       if k not in ignored_keys:
    #           keys.append(k)
    keys = [
            "problem", 
            "representation", 
            "conditionals", 
            "alp_gmm", 
            "change_percentage"
            ]
    columns = None
    data = []
    vals = []

    for i, settings in enumerate(settings_list):
        val_lst = []

        controllable = False
        for k in keys:
            v = settings[k]
            if k == 'conditionals':
                if k != ['NONE']:
                    controllable = True
            if isinstance(settings[k], list):
                if len(settings[k]) < 2:
                    val_lst.append("-".join(settings[k]))
                else:
                    val_lst.append(newline(settings[k][0]+'-', v[1]))
            elif k == 'alp_gmm':
                if not controllable:
                    v = ''
                elif v:
                    v = 'learning'
                else:
                    v = 'random'
                val_lst.append(v)
            else:
                val_lst.append(v)
        args = parse_args(load_args=settings)
        arg_dict = vars(args)
        # FIXME: well this is stupid
        arg_dict["cond_metrics"] = arg_dict.pop("conditionals")
        exp_name = get_exp_name(
            arg_dict.pop("problem"), arg_dict.pop("representation"), **arg_dict
        ) + "_{}_log".format(batch_exp_name)
        # NOTE: For now, we run this locally in a special directory, to which we have copied the results of eval on
        # relevant experiments.
        exp_name = os.path.join(RL_DIR, exp_name)
        stats_f = os.path.join(exp_name, "eval", "scores_ctrlTrgs.json")
        fixTrgs_stats_f = os.path.join(exp_name, "eval", "scores_fixTrgs.json")

        if not (os.path.isfile(stats_f) and os.path.isfile(fixTrgs_stats_f)):
            print(stats_f)
            print(
                "skipping evaluation of experiment due to missing stats file(s): {}".format(
                    exp_name
                )
            )

            continue
        vals.append(tuple(val_lst))
        data.append([])
        stats = json.load(open(stats_f, "r"))
        fixLvl_stats = json.load(open(fixTrgs_stats_f, "r"))
        flat_stats = flatten_stats(fixLvl_stats)
        flat_stats.update(flatten_stats(stats, controllable=True))

        if columns is None:
            columns = list(flat_stats.keys())

        for j, c in enumerate(columns):
            if c not in flat_stats:
                data[-1].append("N/A")
            else:
                data[-1].append(flat_stats[c])

    tuples = vals
    # Rename headers
    new_keys = []

    for k in keys:
        if k in header_text:
            new_keys.append(header_text[k])
        else:
            new_keys.append(k)
    for (i, lst) in enumerate(tuples):
        new_lst = []
        for v in lst:
            if v in header_text:
                new_lst.append(header_text[v])
            else:
                new_lst.append(v)
        tuples[i] = new_lst

    index = pd.MultiIndex.from_tuples(tuples, names=new_keys)
    #   df = index.sort_values().to_frame(index=True)
    df = pd.DataFrame(data=data, index=index, columns=columns).sort_values(by=new_keys)
    #   print(index)

    csv_name = r"{}/cross_eval_{}.csv".format(RL_DIR, batch_exp_name)
    html_name = r"{}/cross_eval_{}.html".format(RL_DIR, batch_exp_name)
    df.to_csv(csv_name)
    df.to_html(html_name)
    print(df)

    #   tex_name = r"{}/zelda_empty-path_cell_{}.tex".format(OVERLEAF_DIR, batch_exp_name)
    # FIXME: F*****G ROUND YOURSELF DUMB FRIEND
#   df = df.round(2)
    for p in ["binary", "zelda", "sokoban"]:
        tex_name = "{}/{}_{}.tex".format(RL_DIR, p, batch_exp_name)
        df_tex = df.loc[p, "narrow"]
        p_name = p + '_ctrl'
        lcl_conds = ['None'] + ['-'.join(pi) if len(pi) < 2 else newline(pi[0]+'-',pi[1]) for pi in local_controls[p_name]]
        print(lcl_conds)
        df_tex = df_tex.loc[lcl_conds]
#       df_tex = df_tex.sort_values(by=['ALP GMM'])
        z_cols = [
            header_text["net_score (mean)"],
            header_text["diversity_score (mean)"],
            header_text["(controls) net_score (mean)"],
#           header_text["(controls) ctrl_score (mean)"],
#           header_text["(controls) fixed_score (mean)"],
            header_text["(controls) diversity_score (mean)"],
        ]
        #   df_tex = df.drop(columns=z_cols)
        df_tex = df_tex.loc[:, z_cols]
        df_tex = df_tex * 100
        df_tex = df_tex.round(0)
        dual_conds = ['None', lcl_conds[1]]
        for k in z_cols:
            if k in df_tex:
#               df_tex.loc[dual_conds][k] = df_tex.loc[dual_conds][k].apply(
#                   lambda data: bold_extreme_values(data, data_max=df_tex.loc[dual_conds][k].max())
#               )
                df_tex[k] = df_tex[k].apply(
                    lambda data: bold_extreme_values(data, data_max=df_tex[k].max())
                )
#       df_tex = df_tex.round(2)
#       df_tex.reset_index(level=0, inplace=True)
        print(df_tex)

        with open(tex_name, "w") as tex_f:
            col_widths = "p{0.5cm}p{0.5cm}p{0.5cm}p{0.5cm}p{0.5cm}p{0.5cm}p{0.8cm}p{0.8cm}p{0.8cm}"
            df_tex.to_latex(
                tex_f,
                index=True,
                columns=z_cols,
                multirow=True,
    #           column_format=col_widths,
                escape=False,
                caption=("Performance of controllable {}-generating agents with learning-progress-informed and uniform-random control regimes and baseline (single-objective) agents with various change percentage allowances.".format(p)),
                label={"tbl:{}".format(p)},
            )
コード例 #24
0
def main():
    args = parse_args()
    print('Called with args:')
    print(args)

    cfg = set_configs(args)
    timers = defaultdict(Timer)

    ### --------------------------------------------------------------------------------
    ### Dataset Training ###
    ### --------------------------------------------------------------------------------
    timers['roidb_training'].tic()
    roidb_training, ratio_list_training, ratio_index_training, category_to_id_map, prd_category_to_id_map = combined_roidb_for_training(
        cfg.TRAIN.DATASETS)
    timers['roidb_training'].toc()
    roidb_size_training = len(roidb_training)
    logger.info('{:d} training roidb entries'.format(roidb_size_training))
    logger.info('Takes %.2f sec(s) to construct training roidb',
                timers['roidb_training'].average_time)

    batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH

    dataset_training = RoiDataLoader(roidb_training,
                                     cfg.MODEL.NUM_CLASSES,
                                     training=True,
                                     dataset=cfg.TRAIN.DATASETS)
    dataloader_training = torch.utils.data.DataLoader(
        dataset_training,
        batch_size=batch_size,
        num_workers=cfg.DATA_LOADER.NUM_THREADS,
        collate_fn=collate_minibatch,
        shuffle=True,
        drop_last=True)
    dataiterator_training = iter(dataloader_training)

    ### --------------------------------------------------------------------------------
    ### Dataset Validation ###
    ### --------------------------------------------------------------------------------
    timers['roidb_val'].tic()
    roidb_val, ratio_list_val, ratio_index_val, _, _ = combined_roidb_for_training(
        cfg.VAL.DATASETS)
    timers['roidb_val'].toc()
    roidb_size_val = len(roidb_val)
    logger.info('{:d} val roidb entries'.format(roidb_size_val))
    logger.info('Takes %.2f sec(s) to construct val roidb',
                timers['roidb_val'].average_time)

    dataset_val = RoiDataLoader(roidb_val,
                                cfg.MODEL.NUM_CLASSES,
                                training=False,
                                dataset=cfg.VAL.DATASETS)
    dataloader_val = torch.utils.data.DataLoader(
        dataset_val,
        batch_size=batch_size,
        num_workers=cfg.DATA_LOADER.NUM_THREADS,
        collate_fn=collate_minibatch,
        drop_last=True)

    ### --------------------------------------------------------------------------------
    ### Dataset Test ###
    ### --------------------------------------------------------------------------------
    timers['roidb_test'].tic()
    roidb_test, ratio_list_test, ratio_index_test, _, _ = combined_roidb_for_training(
        cfg.TEST.DATASETS)
    timers['roidb_test'].toc()
    roidb_size_test = len(roidb_test)
    logger.info('{:d} test roidb entries'.format(roidb_size_test))
    logger.info('Takes %.2f sec(s) to construct test roidb',
                timers['roidb_test'].average_time)

    dataset_test = RoiDataLoader(roidb_test,
                                 cfg.MODEL.NUM_CLASSES,
                                 training=False,
                                 dataset=cfg.TEST.DATASETS)
    dataloader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=batch_size,
        num_workers=cfg.DATA_LOADER.NUM_THREADS,
        collate_fn=collate_minibatch,
        drop_last=True)

    ### --------------------------------------------------------------------------------
    ### Dataset Unseen ###
    ### --------------------------------------------------------------------------------
    if args.dataset == 'vhico':
        timers['roidb_unseen'].tic()
        roidb_unseen, ratio_list_unseen, ratio_index_unseen, _, _ = combined_roidb_for_training(
            cfg.UNSEEN.DATASETS)
        timers['roidb_unseen'].toc()
        roidb_size_unseen = len(roidb_unseen)
        logger.info('{:d} test unseen roidb entries'.format(roidb_size_unseen))
        logger.info('Takes %.2f sec(s) to construct test roidb',
                    timers['roidb_unseen'].average_time)

        dataset_unseen = RoiDataLoader(roidb_unseen,
                                       cfg.MODEL.NUM_CLASSES,
                                       training=False,
                                       dataset=cfg.UNSEEN.DATASETS)
        dataloader_unseen = torch.utils.data.DataLoader(
            dataset_unseen,
            batch_size=batch_size,
            num_workers=cfg.DATA_LOADER.NUM_THREADS,
            collate_fn=collate_minibatch,
            drop_last=True)

    ### --------------------------------------------------------------------------------
    ### Model ###
    ### --------------------------------------------------------------------------------
    maskRCNN = Generalized_RCNN(category_to_id_map=category_to_id_map,
                                prd_category_to_id_map=prd_category_to_id_map,
                                args=args)
    if cfg.CUDA:
        maskRCNN.cuda()

    ### --------------------------------------------------------------------------------
    ### Optimizer ###
    # record backbone params, i.e., conv_body and box_head params
    ### --------------------------------------------------------------------------------
    gn_params = []
    backbone_bias_params = []
    backbone_bias_param_names = []
    prd_branch_bias_params = []
    prd_branch_bias_param_names = []
    backbone_nonbias_params = []
    backbone_nonbias_param_names = []
    prd_branch_nonbias_params = []
    prd_branch_nonbias_param_names = []
    for key, value in dict(maskRCNN.named_parameters()).items():
        if value.requires_grad:
            if 'gn' in key:
                gn_params.append(value)
            elif 'Conv_Body' in key or 'Box_Head' in key or 'Box_Outs' in key or 'RPN' in key:
                if 'bias' in key:
                    backbone_bias_params.append(value)
                    backbone_bias_param_names.append(key)
                else:
                    backbone_nonbias_params.append(value)
                    backbone_nonbias_param_names.append(key)
            else:
                if 'bias' in key:
                    prd_branch_bias_params.append(value)
                    prd_branch_bias_param_names.append(key)
                else:
                    prd_branch_nonbias_params.append(value)
                    prd_branch_nonbias_param_names.append(key)
    # Learning rate of 0 is a dummy value to be set properly at the start of training
    params = [{
        'params': backbone_nonbias_params,
        'lr': 0,
        'weight_decay': cfg.SOLVER.WEIGHT_DECAY
    }, {
        'params':
        backbone_bias_params,
        'lr':
        0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
        'weight_decay':
        cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0
    }, {
        'params': prd_branch_nonbias_params,
        'lr': 0,
        'weight_decay': cfg.SOLVER.WEIGHT_DECAY
    }, {
        'params':
        prd_branch_bias_params,
        'lr':
        0 * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
        'weight_decay':
        cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0
    }, {
        'params': gn_params,
        'lr': 0,
        'weight_decay': cfg.SOLVER.WEIGHT_DECAY_GN
    }]

    if cfg.SOLVER.TYPE == "SGD":
        optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
    elif cfg.SOLVER.TYPE == "Adam":
        optimizer = torch.optim.Adam(params)

    ### --------------------------------------------------------------------------------
    ### Load checkpoint
    ### --------------------------------------------------------------------------------
    if args.load_ckpt:
        load_name = args.load_ckpt
        logging.info("loading checkpoint %s", load_name)
        checkpoint = torch.load(load_name,
                                map_location=lambda storage, loc: storage)
        net_utils.load_ckpt(maskRCNN, checkpoint['model'])

        print(
            '--------------------------------------------------------------------------------'
        )
        print('loading checkpoint %s' % load_name)
        print(
            '--------------------------------------------------------------------------------'
        )

        if args.resume:
            print('resume')
            args.start_step = checkpoint['step'] + 1
            misc_utils.load_optimizer_state_dict(optimizer,
                                                 checkpoint['optimizer'])
        del checkpoint
        torch.cuda.empty_cache()
    else:
        print('args.load_ckpt', args.load_ckpt)

    lr = optimizer.param_groups[2][
        'lr']  # lr of non-backbone parameters, for commmand line outputs.
    backbone_lr = optimizer.param_groups[0][
        'lr']  # lr of backbone parameters, for commmand line outputs.

    maskRCNN = mynn.DataParallel(maskRCNN,
                                 cpu_keywords=['im_info', 'roidb'],
                                 minibatch=True)

    ### --------------------------------------------------------------------------------
    ### Training Setups ###
    ### --------------------------------------------------------------------------------
    args.run_name = args.out_dir
    output_dir = misc_utils.get_output_dir(args, args.out_dir)
    args.cfg_filename = os.path.basename(args.cfg_file)

    if not args.no_save:
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        blob = {'cfg': yaml.dump(cfg), 'args': args}
        with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
            pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)

        if args.use_tfboard:
            from tensorboardX import SummaryWriter
            tblogger = SummaryWriter(output_dir)

    ### --------------------------------------------------------------------------------
    ### Training Loop ###
    ### --------------------------------------------------------------------------------
    maskRCNN.train()

    # Set index for decay steps
    decay_steps_ind = None
    for i in range(1, len(cfg.SOLVER.STEPS)):
        if cfg.SOLVER.STEPS[i] >= args.start_step:
            decay_steps_ind = i
            break
    if decay_steps_ind is None:
        decay_steps_ind = len(cfg.SOLVER.STEPS)

    training_stats = TrainingStats(
        args, args.disp_interval,
        tblogger if args.use_tfboard and not args.no_save else None, True)

    val_stats = ValStats(
        args, args.disp_interval,
        tblogger if args.use_tfboard and not args.no_save else None, False)

    test_stats = TestStats(
        args, args.disp_interval,
        tblogger if args.use_tfboard and not args.no_save else None, False)

    best_total_loss = np.inf
    best_eval_result = 0

    ### --------------------------------------------------------------------------------
    ### EVAL ###
    ### --------------------------------------------------------------------------------
    if cfg.EVAL_SUBSET == 'unseen':
        print('testing unseen ...')
        is_best, best_eval_result = run_eval(args,
                                             cfg,
                                             maskRCNN,
                                             dataloader_unseen,
                                             step=0,
                                             output_dir=output_dir,
                                             test_stats=test_stats,
                                             best_eval_result=best_eval_result,
                                             eval_subset=cfg.EVAL_SUBSET)
        return
    elif cfg.EVAL_SUBSET == 'test':
        print('testing ...')
        is_best, best_eval_result = run_eval(args,
                                             cfg,
                                             maskRCNN,
                                             dataloader_test,
                                             step=0,
                                             output_dir=output_dir,
                                             test_stats=test_stats,
                                             best_eval_result=best_eval_result,
                                             eval_subset=cfg.EVAL_SUBSET)
        return

    ### --------------------------------------------------------------------------------
    ### TRAIN ###
    ### --------------------------------------------------------------------------------
    try:
        logger.info('Training starts !')
        step = args.start_step
        for step in range(args.start_step, cfg.SOLVER.MAX_ITER):
            # Warm up
            if step < cfg.SOLVER.WARM_UP_ITERS:
                method = cfg.SOLVER.WARM_UP_METHOD
                if method == 'constant':
                    warmup_factor = cfg.SOLVER.WARM_UP_FACTOR
                elif method == 'linear':
                    alpha = step / cfg.SOLVER.WARM_UP_ITERS
                    warmup_factor = cfg.SOLVER.WARM_UP_FACTOR * (1 -
                                                                 alpha) + alpha
                else:
                    raise KeyError(
                        'Unknown SOLVER.WARM_UP_METHOD: {}'.format(method))
                lr_new = cfg.SOLVER.BASE_LR * warmup_factor
                net_utils.update_learning_rate_rel(optimizer, lr, lr_new)
                lr = optimizer.param_groups[2]['lr']
                backbone_lr = optimizer.param_groups[0]['lr']
                assert lr == lr_new
            elif step == cfg.SOLVER.WARM_UP_ITERS:
                net_utils.update_learning_rate_rel(optimizer, lr,
                                                   cfg.SOLVER.BASE_LR)
                lr = optimizer.param_groups[2]['lr']
                backbone_lr = optimizer.param_groups[0]['lr']
                assert lr == cfg.SOLVER.BASE_LR

            # Learning rate decay
            if decay_steps_ind < len(cfg.SOLVER.STEPS) and \
                    step == cfg.SOLVER.STEPS[decay_steps_ind]:
                logger.info('Decay the learning on step %d', step)
                lr_new = lr * cfg.SOLVER.GAMMA
                net_utils.update_learning_rate_rel(optimizer, lr, lr_new)
                lr = optimizer.param_groups[2]['lr']
                backbone_lr = optimizer.param_groups[0]['lr']
                assert lr == lr_new
                decay_steps_ind += 1

            #########################################################################################################################
            ## train
            #########################################################################################################################
            training_stats.IterTic()
            optimizer.zero_grad()

            for inner_iter in range(args.iter_size):
                try:
                    input_data = next(dataiterator_training)
                except StopIteration:
                    print('recurrence data loader')
                    dataiterator_training = iter(dataloader_training)
                    input_data = next(dataiterator_training)

                for key in input_data:
                    if key != 'roidb':  # roidb is a list of ndarrays with inconsistent length
                        input_data[key] = list(map(Variable, input_data[key]))

                net_outputs = maskRCNN(**input_data)

                training_stats.UpdateIterStats(net_outputs['gt_label'],
                                               inner_iter)
                loss = net_outputs['gt_label']['total_loss']
                loss.backward()

            optimizer.step()
            training_stats.IterToc()
            training_stats.LogIterStats(step, lr, backbone_lr)

            if (step + 1) % cfg.SAVE_MODEL_ITER == 0:
                save_ckpt(output_dir, args, step, batch_size, maskRCNN,
                          optimizer, False, best_total_loss)

        # ---- Training ends ----
        save_ckpt(output_dir, args, step, batch_size, maskRCNN, optimizer,
                  False, best_total_loss)

    except (RuntimeError, KeyboardInterrupt):
        del dataiterator_training
        logger.info('Save ckpt on exception ...')
        save_ckpt(output_dir, args, step, batch_size, maskRCNN, optimizer,
                  False, best_total_loss)
        logger.info('Save ckpt done.')
        stack_trace = traceback.format_exc()
        print(stack_trace)

    finally:
        if args.use_tfboard and not args.no_save:
            tblogger.close()
コード例 #25
0
ファイル: datasets.py プロジェクト: adbugger/FewShot
        self.image_channels = 3
        self.image_size = 84
        super(fc100, self).__init__(options)


class miniImagenet(FewShotDataset):
    def __init__(self, options):
        self.dataset_root = abspath(
            getattr_or_default(options, 'dataset_root',
                               '/home/aditya.bharti/mini-imagenet'))

        self.mean = (0.4802, 0.4481, 0.3975)
        self.std = (0.2302, 0.2265, 0.2262)

        self.image_channels = 3
        self.image_size = 84
        super(miniImagenet, self).__init__(options)


if __name__ == "__main__":
    from arguments import parse_args
    opts = parse_args()

    for dataset in [fc100, miniImagenet, cifar100fs, Imagenet1k]:
        d = dataset(opts)
        print(str(d), "loaded")
        if (hasattr(d, "trainval_set")):
            print(str(d), "has trainval")
        else:
            print(str(d), "couldn't find trainval")
コード例 #26
0
    tb_writer = SummaryWriter('./log/tb_{0}'.format(args.log_name))

    # Set seed
    set_seed(args.seed, cudnn=args.make_deterministic)

    # Set sampler
    sampler = BatchSampler(args, log)

    # Set policy
    policy = CaviaMLPPolicy(
        input_size=int(np.prod(sampler.observation_space.shape)),
        output_size=int(np.prod(sampler.action_space.shape)),
        hidden_sizes=(args.hidden_size, ) * args.num_layers,
        num_context_params=args.num_context_params,
        device=args.device)

    # Initialise baseline
    baseline = LinearFeatureBaseline(
        int(np.prod(sampler.observation_space.shape)))

    # Initialise meta-learner
    metalearner = MetaLearner(sampler, policy, baseline, args, tb_writer)

    # Begin train
    train(sampler, metalearner, args, log, tb_writer)


if __name__ == '__main__':
    args = parse_args()
    main(args)
コード例 #27
0
ファイル: client.py プロジェクト: kxgames/kingdoms-of-life
#!/usr/bin/env python3

import seacow
import arguments

arguments.parser.add_argument('--host', '-x', default='kxgames.net')
arguments.parser.add_argument('--port', '-p', default=53351, type=int)
arguments.parse_args()

game = seacow.ClientLoop('Client', arguments.host, arguments.port)
game.play()
コード例 #28
0
                eval_correct_characters += correct
            bar.update(batch_id + 1,
                       exact=[("eval score", eval_score / eval_total_instances)
                              ])

        logger.info('{} dataset accuracy: {}'.format(
            dataset_name, eval_correct_characters / eval_total_characters))


def save_model(model):
    filename = os.path.join(args.output_dir, 'model.pt')
    utils.ensure_folder(filename)
    torch.save(model, filename)


args = arguments.parse_args()
logger = init_logger()
logger.info(args)


def main():
    training_data, dev_data, test_data, char_to_ix, tag_to_ix = load_datasets()

    START_TAG = "<START>"
    STOP_TAG = "<STOP>"
    char_to_ix, [START_CHAR_ID,
                 STOP_CHAR_ID] = complete_collection(char_to_ix,
                                                     [START_TAG, STOP_TAG])
    tag_to_ix, [START_TAG_ID,
                STOP_TAG_ID] = complete_collection(tag_to_ix,
                                                   [START_TAG, STOP_TAG])
コード例 #29
0
def main():
    args = parse_args()

    print(args)

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO,
    )

    set_seed(args.seed, args.n_gpu)

    # Prepare GLUE tasks
    processors = {}
    output_modes = {}
    label_lists = {}
    num_label_list = {}
    for key in args.task_params:
        print(processors)
        processors[key] = glue_processors[key]()
        output_modes[key] = glue_output_modes[key]
        label_lists[key] = processors[key].get_labels()
        num_label_list[key] = len(label_lists[key])

    # Configs
    configs = {}
    for key in args.task_params:
        configs[key] = AutoConfig.from_pretrained(
            args.model_type,
            # args.config_name if args.config_name else args.model_name_or_path,
            num_labels=num_label_list[key],
            finetuning_task=key,
            cache_dir=None,
        )

    # Tokenizer
    tokenizer = AutoTokenizer.from_pretrained(
        args.model_type,
        do_lower_case=args.do_lower_case,
        cache_dir=None,
    )

    # Continual Learning
    n = len(configs)
    accuracy_matrix = np.zeros((n, n))
    transfer_matrix = np.zeros((n, n))

    tasks = list(args.task_params.keys())
    models = []
    # Model
    for key in args.task_params:
        if args.n_gpu <= 1:
            if args.mas:
                models.append(
                    (key,
                     shared_model(
                         AutoModelForSequenceClassification.from_pretrained(
                             args.model_type, config=configs[key]))))
            else:
                models.append(
                    (key,
                     AutoModelForSequenceClassification.from_pretrained(
                         args.model_type, config=configs[key])))
        else:
            if args.mas:
                models.append((
                    key,
                    shared_model(
                        torch.nn.DataParallel(
                            AutoModelForSequenceClassification.from_pretrained(
                                args.model_type, config=configs[key])))))
            else:
                models.append(
                    (key,
                     torch.nn.DataParallel(
                         AutoModelForSequenceClassification.from_pretrained(
                             args.model_type, config=configs[key]))))

    for i in range(n):
        models[i][1].to(args.device)
        save_model(args, i, models[i][1])
    if args.cuda == torch.device('cuda') and torch.cuda.device_count() > 1:
        for key in models:
            models[key] = torch.nn.DataParallel(models[key])
    consolidate_fisher = {}
    consolidate_mean = {}

    for i in range(len(configs)):
        if (i > 0):
            part_name = partial_name(args)
            # Always load the BERT parameters of previous model
            if args.mas:
                models[i][1].tmodel.load_state_dict(torch.load(
                    os.path.join(args.output_dir, part_name,
                                 "bert_paramters_" + str(i - 1) + ".pt")),
                                                    strict=False)
                models[i][1].reg_params = models[i - 1][1].reg_params
            else:
                models[i][1].load_state_dict(torch.load(
                    os.path.join(args.output_dir, part_name,
                                 "bert_paramters_" + str(i - 1) + ".pt")),
                                             strict=False)
        new_args = convert_dict(args.task_params[tasks[i]], args)
        train_dataset = load_and_cache_examples(args,
                                                tasks[i],
                                                tokenizer,
                                                evaluate=False)
        # --- No randomness till here ---
        global_step, tr_loss, accuracy_matrix, model = train(
            new_args, train_dataset, tasks[i], tasks, models[i][1], i,
            tokenizer, accuracy_matrix, consolidate_fisher, consolidate_mean)
        logger.info(" global_step = %s, average loss = %s", global_step,
                    tr_loss)
        if args.ewc:
            if (i == (len(configs) - 1)):
                print(
                    "\n ***** Last Task : No need to estimate fisher matrix ***** \n"
                )
                break
            else:
                print()
                print(
                    "***** Estimating Diagonals of Fisher Information Matrix *****"
                )
                print()
                consolidate_fisher, consolidate_mean = estimate_fisher_mean(
                    new_args, train_dataset, model, tasks[i],
                    consolidate_fisher, consolidate_mean,
                    args.per_gpu_batch_size)

    print("\n ***** Accuracy Matrix *****\n")
    print(accuracy_matrix)
    print("\n***** Transfer Matrix *****")
    print(
        "Future Transfer => Upper Triangular Matrix  ||  Backward Transfer => Lower Triangular Matrix\n"
    )

    for i in range(n):
        for j in range(n):
            transfer_matrix[j][
                i] = accuracy_matrix[j][i] - accuracy_matrix[i][i]

    print(transfer_matrix)
コード例 #30
0
import file_handler as fh
import evaluation_handler as eh
import learn_handler as lh
from vectorizer import Vectorizer

if __name__ == '__main__':
    # Load Data
    # senti_dir = './KOSAC_sample/'
    # # {'sentences':sentences, 'tokens':tokens, 'polarity':polarity}
    # corpus = fh.getSentiCorpus(senti_dir)
    # contents = corpus['sentences']

    comments = fh.getComments()
    contents = comments['sentences']

    # Vectorizing
    # vec = Vectorizer()
    # tokenized_contents = vec.tokenizing(contents)
    # # sg=1(skip-gram), 0(CBOW)
    # model_path = 'models/word2vec_ko.model'
    # vectorized_contents = vec.vectorize(model_path, contents, dims=100)
    #
    # fh.saveVectorizedContents(vectorized_contents)
    vectorized_contents = fh.loadVectorizedContents()

    # Train ML and Evaluation
    args = arguments.parse_args('Bidirectional_LSTM')
    eh.evaluations(vectorized_contents, comments['scores'],
                   lh.Bidirectional_LSTM(args))
コード例 #31
0
from plot import plot_acc, plot_gan_losses, plot_confusion_matrix
from arguments import parse_args
import random
import torch
import torch.backends.cudnn as cudnn
import os
import numpy as np
from dataset import FeaturesCls, FeaturesGAN
from train_cls import TrainCls
from train_gan import TrainGAN
from generate import load_unseen_att, load_all_att
from mmdetection.splits import get_unseen_class_labels

opt = parse_args()


try:
    os.makedirs(opt.outname)
except OSError:
    pass

if opt.manualSeed is None:
    opt.manualSeed = random.randint(1, 10000)

for arg in vars(opt): print(f"######################  {arg}: {getattr(opt, arg)}")


print("Random Seed: ", opt.manualSeed)

random.seed(opt.manualSeed)