Exemplo n.º 1
0
def main():
    parser = rl_looper_arguments()
    args, unknown = parser.parse_known_args()
    args = vars(args)
    save_path = os.path.join("../out/qgen", args["qgen_name"], args["name"])
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    log_path = os.path.join(save_path, "rl_train.log")
    logger = create_logger(log_path, "w")
    args["cate_rl"] = "cls" in args["rl_task"]
    args["gen_rl"] = "gen" in args["rl_task"]
    logger.info(args)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # device = torch.device("cpu")
    data_dir = "./../data"
    looper = Looper(data_dir, args, device, logger)
    optimizer = torch.optim.Adam(looper.qgen_model.parameters(), lr=args["lr"])

    success_rate, failed, success = looper.eval()
    with open(os.path.join(save_path, "rl_fail_start.json"), mode="w") as f:
        json.dump(failed, f, indent=2)
    with open(os.path.join(save_path, "rl_success_start.json"), mode="w") as f:
        json.dump(success, f, indent=2)
    best_success_rate = success_rate
    logger.info("Init success_rate: {:.3f}%".format(success_rate * 100))
    failed_file = os.path.join(save_path, "rl_fail.json")
    success_file = os.path.join(save_path, "rl_success.json")
    for epoch in range(1, args["epoch"]):
        # reward_loss = looper.rl_train_epoch(optimizer)
        reward_loss = looper.rl_sample_reward_epoch(optimizer)
        print("epoch: {}, reward_loss: {:.4f}".format(epoch, reward_loss))
        if epoch % 2 == 0:
            success_rate, failed, success = looper.eval()
            with open(failed_file, mode="w") as f:
                json.dump(failed, f)
            with open(success_file, mode="w") as f:
                json.dump(success, f)
            logger.info("epoch: {}, success_rate: {:.3f}%".format(epoch, success_rate*100))
            if success_rate > best_success_rate:
                best_success_rate = success_rate
                torch.save(
                    looper.qgen_model.state_dict(),
                    os.path.join("../out/qgen", args["qgen_name"], "rl_model.bin"))
    success_rate, failed, success = looper.eval()
    with open(failed_file, mode="w") as f:
        json.dump(failed, f)
    with open(success_file, mode="w") as f:
        json.dump(success, f)
    result_file = os.path.join("../out/games", "test.json")
    turns = "{}turns".format(args["max_turn"])
    models_name = ",".join(["reinforce", turns, args["qgen_name"], args["oracle_name"], args["guesser_name"]])
    print("model_name: ", models_name)
    res = {}
    if os.path.exists(result_file):
        with open(result_file, mode="r") as f:
            res = json.load(f)
    res[models_name] = round(success_rate, 4)
    with open(result_file, mode="w") as f:
        json.dump(res, f, indent=2)
Exemplo n.º 2
0
import torch
import torch.nn as nn
from torch.utils import data
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision.utils as vutils
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter

from dataset.LIP_dataset_RGB import LIPDataSet
print('RGB dataset')
print(config.NUM_WORKERS)
# from networks.CDinkNet_ASPP import CDinkNet_ASPP

# -------------------------pre---------------------------------------
logger, out_dir, vis_dir = create_logger(config)
logger.info('initalize logger succesiful.')
writer = SummaryWriter(vis_dir)

cudnn.enabled = True
# cudnn related setting
cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.enabled = True
gpus = [int(i) for i in config.GPUS.split(',')]
# ------------------------got data and model------------------------------------------
w, h = map(int, config.DATA.INPUT_SIZE.split(','))
input_size = [w, h]
heatmap_size = [w // 4, h // 4]
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
Exemplo n.º 3
0
def main():
	global args, best_nmi, start_epoch
	
	os.makedirs('{}'.format(args.save_path), exist_ok=True)

	# logging configuration
	logger = create_logger('global_logger', log_file=os.path.join(args.save_path,'log.txt'))
	logger.info('{}'.format(args))
	logger.info('{}'.format(coeff))
	tb_logger = SummaryWriter(args.save_path)

	# Construct Networks (Encoder, dim_loss)
	model = models.__dict__[args.arch](args.num_classes).cuda()
	print("=> created encoder '{}'".format(args.arch))
	
	toy_input = torch.zeros([5, 3, args.input_size, args.input_size]).cuda()
	arch_info = get_dim(model, toy_input, args.layers, args.c_layer)

	dim_loss = models.__dict__['DIM_Loss'](arch_info).cuda()

	# optimizer
	para_dict = itertools.chain(filter(lambda x: x.requires_grad, model.parameters()),
		  filter(lambda x: x.requires_grad, dim_loss.parameters()))
	optimizer = torch.optim.RMSprop(para_dict, lr=args.lr, alpha=0.9)

	# criterions
	crit_graph = nn.BCELoss().cuda()
	crit_label = WeightedBCE().cuda()
	crit_c = nn.CrossEntropyLoss().cuda()

	# optionally resume from a checkpoint
	if args.resume:
		logger.info("=> loading checkpoint '{}'".format(args.resume))
		start_epoch, best_nmi = load_checkpoint(model, dim_loss, optimizer, args.resume)

	# data loading
	dataset = McDataset(
		  args.root, 
		  args.source, 
		  transform=transforms.ToTensor())
	dataloader = torch.utils.data.DataLoader(
		  dataset, batch_size=args.large_bs,
		  num_workers=args.workers, pin_memory=True, shuffle=True)
	datagen = ImageDataGenerator(
		  rotation_range=20,
		  width_shift_range=0.18,
		  height_shift_range=0.18,
		  channel_shift_range=0.1,
		  horizontal_flip=True,
		  rescale=0.95,
		  zoom_range=[0.85,1.15])

	
	for epoch in range(start_epoch, args.epochs):
	
		end = time.time()

		# Evaluation
		nmi, acc, ari = test(dataloader, model, epoch, tb_logger)
	
		# saving checkpoint
		is_best_nmi = nmi > best_nmi
		best_nmi = max(nmi, best_nmi)
		save_checkpoint({
			  'epoch': epoch, 
			  'model': model.state_dict(), 
			  'dim_loss': dim_loss.state_dict(), 
			  'best_nmi': best_nmi,
			  'optimizer': optimizer.state_dict()}, 
			  is_best_nmi, args.save_path + '/ckpt') 

		# training
		train(dataloader, model, dim_loss, crit_label, crit_graph, crit_c, optimizer, epoch, datagen, tb_logger)
Exemplo n.º 4
0
            args["object"] = True
            args["image_arch"] = "rcnn"
            args["image_dim"] = 2048
        with open(save_path.format("args.json"), mode="w") as f:
            json.dump(args, f, indent=2, ensure_ascii=False)
        logger.info(args)
        model = QGenNetwork(args, tokenizer, device).to(device)
        train_loader, val_loader = prepare_dataset(data_dir, "train", args,
                                                   tokenizer)
        train(model, args, train_loader, val_loader, param_file)
    else:
        with open(save_path.format("args.json"), mode="r") as f:
            saved_args = json.load(f)
            saved_args["option"] = "test"
        args = saved_args
        logger.info(args)
        model = QGenNetwork(args, tokenizer, device).to(device)
        testloader = prepare_dataset(data_dir, "test", args, tokenizer)
        test(model, args, testloader, param_file)


if __name__ == "__main__":
    parser = qgen_arguments()
    flags, unknown = parser.parse_known_args()
    flags = vars(flags)
    model_dir = "./../out/qgen/" + flags["name"]
    os.makedirs(model_dir) if not os.path.exists(model_dir) else None
    save_path = model_dir + "/{}"
    logger = create_logger(save_path.format('train.log'), "w")
    main(flags)