Пример #1
0
def prepare_cifar_10_data(use_norm_shift=False, use_norm_scale=True):
    validation_data_size = 5000  # Size of the validation set.
    train_data = []
    train_label = []
    args = parse_args()
    for id in range(1, 6):
        train_filename = os.path.join(args.data_dir, "data_batch_%d" % id)
        train_data_batch, train_label_batch = load_CIFAR_batch(train_filename)
        train_data.append(train_data_batch)
        train_label.append(train_label_batch)

    train_data = np.concatenate(train_data)  #50000 * 3072(32 * 32 * 3)
    train_label = np.concatenate(train_label).reshape(-1, 1)  #50000 * 1

    test_filename = os.path.join(args.data_dir, "test_batch")
    test_data, test_label = load_CIFAR_batch(test_filename)
    test_label = test_label.reshape(-1, 1)

    # Generate a validation set.
    validation_data = train_data[:validation_data_size, :]
    validation_labels = train_label[:validation_data_size, :]
    train_data = train_data[validation_data_size:, :]
    train_label = train_label[validation_data_size:, :]

    #data normalization
    train_data = data_normalize(train_data, use_norm_shift, use_norm_scale)
    test_data = data_normalize(test_data, use_norm_shift, use_norm_scale)
    validation_data = data_normalize(validation_data, use_norm_shift,
                                     use_norm_scale)

    return train_data, train_label, validation_data, validation_labels, test_data, test_label
Пример #2
0
 def get_args(output_dir):
     return run.parse_args(
         [output_dir],
         {
             k: int(v.value) if "boolean" in v.tags else v.value
             for k, v in run_models.items() if not v.disabled
         },
     )
Пример #3
0
    def __init__(self):

        self.resource_dir = "../resource/"
        self.instance_dir = "../instances/"

        args = parse_args()

        args.optimizer = 'adam'
        args.loss = 'binary_crossentropy'
        args.need_char_level = True
        args.need_word_level = True
        args.word_trainable = False
        args.char_trainable = False
        args.lr = 0.001

        args.save_dir = "../saved_models/"
        args.word_emb_dir = "../instances/word_embed.txt"
        args.char_emb_dir = "../instances/char_embed.txt"
        args.r_dir = "../resource/"
        self.name = "Bi-GRU2"

        super(LiuModel1, self).__init__(args)
Пример #4
0
		# print(param.sim_times[0:step].shape)
		# exit()
		result = np.hstack((param.sim_times[0:step].reshape(-1,1), states[0:step]))
		# store in binary format
		basename = os.path.splitext(os.path.basename(instance))[0]
		folder_name = "../results/doubleintegrator/{}".format(name)
		if not os.path.exists(folder_name):
			os.mkdir(folder_name)

		output_file = "{}/{}.npy".format(folder_name, basename)
		with open(output_file, "wb") as f:
			np.save(f, result.astype(np.float32), allow_pickle=False)

if __name__ == '__main__':

	args = parse_args()
	param = DoubleIntegratorParam()
	env = DoubleIntegrator(param)

	if args.il:
		run(param, env, None, None, args)
		exit()

	controllers = {
		# 'emptywapf': Empty_Net_wAPF(param,env,torch.load('../results/doubleintegrator/exp1Empty_0/il_current.pt')),
		# 'e2e':torch.load('../results/doubleintegrator/exp1Barrier_0/il_current.pt'),
		# 'empty':torch.load('../results/doubleintegrator/exp1Empty_0/il_current.pt'),

		# 'current':torch.load(param.il_train_model_fn),
		# 'current_wapf': Empty_Net_wAPF(param,env,torch.load(param.il_train_model_fn)),
		# 'gg': GoToGoalPolicy(param,env),
Пример #5
0
 def test_argument_parser_accept_one_bug(self) -> None:
     """
     Tests that it is possible to give one bug and one plugin
     """
     args = run.parse_args(["success", "pbzip-2094"])
     self.assertEqual(len(args["bugs"]), 1)
Пример #6
0
 def test_argument_parser_accept_one_bug(self) -> None:
     """
     Tests that it is possible to give one bug and one plugin
     """
     args = run.parse_args(["success", "pbzip-2094"])
     self.assertEqual(len(args["bugs"]), 1)
Пример #7
0
"""
测试MULT中使用的ATTENTION机制的特点,是否是强变强、其它变弱?
"""
import torch

from config.config_run import Config
from data.load_data import MMDataLoader
from models.AMIO import AMIO
from run import parse_args
from trains.ATIO import ATIO

model_path = "/home/zhuchuanbo/paper_code/results/model_saves/mult-sims-M.pth"

# 进行参数配置
configs = Config(parse_args()).get_config()
device = torch.device('cuda:%d' % configs.gpu_ids[0])
configs.device = device

# 定义并且加载模型
dataloader = MMDataLoader(configs)
model = AMIO(configs).to(device)

model.load_state_dict(torch.load(model_path))
model.eval()

atio = ATIO().get_train(configs)
results = atio.do_test(model, dataloader['test'], mode="TEST")

import torchvision.models as models
Пример #8
0
def test_run():
    # Test running the training for the bug model.
    run.main(run.parse_args(["--train", "--goal", "defect"]))

    # Test loading the trained model.
    run.main(run.parse_args(["--goal", "defect"]))
Пример #9
0
#     --viz-action custom ^
#     --viz-camera 0 ^
#     --viz-video "video_input/%myvideo%" ^
#     --viz-output "%output%".mp4 ^
#     --viz-size 6 ^
#     --output_json "%output%".json
#
#

import inference.infer_video_d2 as step2
import data.prepare_data_2d_custom as step4
import run as step5

if __name__ == "__main__":

    step2.setup_logger()

    # we parse the args only once so we can catch all of them.
    args = step5.parse_args()

    steps: str = args.steps

    if '2' in steps:
        step2.main(args)

    if '4' in steps:
        step4.the_main_thing(args)

    if '5' in steps:
        step5.the_main_kaboose(args)