def main():
	parser = Options()
	args = parser.parse()
	
	# exp path
	args.path = get_exp_path(args)
	if args.resume or args.split !='train': #=='val' or args.split == 'cycgen': #or ( args.runner == 'gan' and args.load_G) :
		args.path = args.load_dir
	else:
		pathlib.Path(args.path).mkdir(parents=True, exist_ok=False)
		(pathlib.Path(args.path) / 'checkpoint').mkdir(parents=True, exist_ok=False)

	# find free port
	if args.port is None:
		with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
			s.bind(('', 0))
			s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
			args.port = int(s.getsockname()[1])

	# logger
	# if args.split == 'val':
	logger = get_logger(args.path + '/experiment_{}.log'.format(args.split)) #if args.interval == 2 else \
	# get_logger(args.path + '/experiment_val_int_1.log')
	# else:
	# 	logger = get_logger(args.path + '/experiment.log')
	logger.info('Start of experiment')
	logger.info('=========== Initilized logger =============')
	logger.info('\n\t' + '\n\t'.join('%s: %s' % (k, str(v))
		for k, v in sorted(dict(vars(args)).items())))
	
	# distributed training
	args.gpus = torch.cuda.device_count()
	logger.info('Total number of gpus: %d' % args.gpus)
	mp.spawn(worker, args=(args,), nprocs=args.gpus)
Ejemplo n.º 2
0
def open_browser(browser_type):
    # 添加异常处理机制,确保健壮性
    # noinspection PyBroadException
    try:
        if browser_type == 'Chrome':
            driver = webdriver.Chrome(options=Options().chrome_options())
        else:
            driver = getattr(webdriver, browser_type)()
    except Exception as e:
        driver = webdriver.Chrome()
    return driver
Ejemplo n.º 3
0
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # save log to disk
    if opt.mode == "Test":
        log_path = opt.out_dir + "/test.log"

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    logging.getLogger().setLevel(logging.INFO)

    # load train or test data
    data_loader = PoseDataLoader(opt)
    test_set = data_loader.GetTestSet()

    # load model
    model = load_model(opt)
    model.eval()

    # use cuda
    if torch.cuda.is_available():
        model = model.cuda(opt.device_ids[0])
        cudnn.benchmark = True

    # Test model
    if opt.mode == "Test":
        test(model, test_set, opt)
Ejemplo n.º 4
0
                writer.add_scalar('train/critic_lr', lr[0], step)
                writer.add_scalar('train/actor_lr', lr[1], step)
                writer.add_scalar('train/Q', tot_Q / episode_train_times, step)
                writer.add_scalar('train/critic_loss',
                                  tot_value_loss / episode_train_times, step)
            if debug:                print('#{}: steps:{} interval_time:{:.2f} train_time:{:.2f}' \
          .format(episode, step, train_time_interval, time.time()-time_stamp))
            time_stamp = time.time()
            # reset
            observation = None
            episode_steps = 0
            episode += 1


if __name__ == "__main__":
    opt = Options().parse()

    opt.output = get_output_folder(opt.output, "Paint")
    np.random.seed(opt.seed)
    torch.manual_seed(opt.seed)
    if torch.cuda.is_available(): torch.cuda.manual_seed_all(opt.seed)
    random.seed(opt.seed)
    torch.backends.cudnn.deterministic = False
    torch.backends.cudnn.benchmark = True
    from DRL.ddpg import DDPG
    from DRL.multi import fastenv
    # fenv = fastenv(args.max_step, args.env_batch, writer, args.canvas_color, args.loss_fcn, args.dataset, args.use_multiple_renderers)
    # agent = DDPG(args.batch_size, args.env_batch, args.max_step, \
    #              args.tau, args.discount, args.rmsize, \
    #              writer, args.resume, args.output, args.loss_fcn, args.renderer, args.use_multiple_renderers)
    fenv = fastenv(opt, writer)
def main():
    # parse options 
    op = Options()
    opt = op.parse()

    # special setting
    opt.shuffle = False
    opt.batch_size = 1
    opt.load_thread = 1

    # initialize train or test working dir
    test_dir = os.path.join(opt.classify_dir , opt.name)
    opt.model_dir = opt.dir + "/trainer_" + opt.name + "/Train/"
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)

    # save options to disk
    opt2file(opt, os.path.join(test_dir, "opt.txt"))
    
    # log setting 
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(test_dir + "/deploy.log", 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    logging.getLogger().setLevel(logging.INFO)
    
    # load label  
    if opt.label_file == "":
        opt.label_file = opt.dir + "/label.txt"
    rid2name, id2rid, rid2id = load_label(opt.label_file)
    num_classes = [len(rid2name[index])-2 for index in range(len(rid2name))]
        
    # load transformer
    transformer = get_transformer(opt) 

    # load model
    model = load_model(opt, num_classes)
    model.eval()
    
    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        cudnn.benchmark = True
    
    l = open(test_dir + "/classify_res_data.txt", 'w')
    with open(opt.classify_dir + "/data.txt") as data:
        for num, line in enumerate(data):
            logging.info(str(num+1))
            line = json.loads(line)
            input_tensor = load_image(line["image_file"], line["box"], opt, transformer) 
            input_tensor = input_tensor.unsqueeze(0)
            if opt.cuda:
                input_tensor = input_tensor.cuda(opt.devices[0])
            outputs = model(Variable(input_tensor, volatile=True)) 
            if not isinstance(outputs, list):
                outputs = [outputs]
            line["classify_res"] = list() 
            for index, out in enumerate(outputs):
                out = out.cpu()
                #print "out:", out
                softmax = F.softmax(out, dim=1).data.squeeze()
                #print "softmax:", softmax 
                probs, ids = softmax.sort(0, True)
                classify_res = {}
                for i in range(len(probs)):
                    classify_res[rid2name[index][id2rid[index][ids[i]]]] = probs[i]
                classify_res["max_score"] = probs[0]
                classify_res["best_label"] = rid2name[index][id2rid[index][ids[0]]]
                line["classify_res"].append(classify_res)
            l.write(json.dumps(line, separators=(',', ':'))+'\n')
    l.close()
    logging.info("classification done")
Ejemplo n.º 6
0
import os
from netdissect import pidfile
from options.options import Options
opt = Options().parse()

def get_imgs():
    img_nums = sorted([int(f.strip().split(f'{base_name}_')[1].split('.')[0]) for f in os.listdir(opt.source)])
    file_names = [f'{base_name}_{num}.png' for num in img_nums]
    return img_nums, file_names


N = 100
start_imgnum = int(opt.imgnum) * N
base_name = os.path.basename(opt.source)
pid_file = os.path.join(opt.results_dir, base_name, f'pid_{opt.imgnum}')
print('pidfile', pid_file)

def main():
    import numpy as np
    from models import vgg19_model
    from algorithms import neural_best_buddies as NBBs
    from util import util
    from tqdm import tqdm
    from util import MLS

    vgg19 = vgg19_model.define_Vgg19(opt)
    img_nums, images = get_imgs()

    for imgnum in tqdm(range(start_imgnum, start_imgnum + N)):
        save_dir = os.path.join(opt.results_dir, str(img_nums[imgnum]))
        print('Working on', images[imgnum])
Ejemplo n.º 7
0
import os
import time
from tqdm import tqdm

from options.options import Options
from models import resnet_model
from datasets import create_dataset
from utils.visualizer import Visualizer

if __name__ == '__main__':
    opt = Options().parse_args()  # get training options

    dataset = create_dataset(opt)

    model = resnet_model.ResnetModel(opt)

    visualizer = Visualizer(
        opt)  # create a visualizer that display/save images and plots

    total_iters = 0

    for epoch in range(opt.num_epoch):

        epoch_start_time = time.time()  # timer for entire epoch
        iter_data_time = time.time()  # timer for data loading per iteration
        epoch_iter = 0  # the number of training iterations in current epoch, reset to 0 every epoch

        for i, data in enumerate(dataset):  # inner loop within one epoch

            iter_start_time = time.time(
            )  # timer for computation per iteration
def main():
    # parse options 
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    trainer_dir = "trainer_" + opt.name
    opt.model_dir = os.path.join(opt.dir, trainer_dir, "Train") 
    opt.data_dir = os.path.join(opt.dir, trainer_dir, "Data") 
    opt.test_dir = os.path.join(opt.dir, trainer_dir, "Test") 
    
    if not os.path.exists(opt.data_dir):
        os.makedirs(opt.data_dir)
    if opt.mode == "Train":
        if not os.path.exists(opt.model_dir):        
            os.makedirs(opt.model_dir)
        log_dir = opt.model_dir 
        log_path = log_dir + "/train.log"
    if opt.mode == "Test":
        if not os.path.exists(opt.test_dir):
            os.makedirs(opt.test_dir)
        log_dir = opt.test_dir
        log_path = log_dir + "/test.log"

    # save options to disk
    util.opt2file(opt, log_dir+"/opt.txt")
    
    # log setting 
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)
    
    # load train or test data
    data_loader = MultiLabelDataLoader(opt)
    if opt.mode == "Train":
        train_set = data_loader.GetTrainSet()
        val_set = data_loader.GetValSet()
    elif opt.mode == "Test":
        test_set = data_loader.GetTestSet()

    num_classes = data_loader.GetNumClasses()
    rid2name = data_loader.GetRID2Name()
    id2rid = data_loader.GetID2RID()
    opt.class_num = len(num_classes)

    # load model
    model = load_model(opt, num_classes)

    # define loss function
    criterion = nn.CrossEntropyLoss(weight=opt.loss_weight) 
    
    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion = criterion.cuda(opt.devices[0])
        cudnn.benchmark = True
    
    # Train model
    if opt.mode == "Train":
        train(model, criterion, train_set, val_set, opt, (rid2name, id2rid))
    # Test model
    elif opt.mode == "Test":
        test(model, criterion, test_set, opt)
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    trainer_dir = "trainer_" + opt.name
    opt.model_dir = os.path.join(opt.dir, trainer_dir, "Train")
    opt.data_dir = os.path.join(opt.dir, trainer_dir, "Data")
    opt.test_dir = os.path.join(opt.dir, trainer_dir, "Test")

    if not os.path.exists(opt.data_dir):
        os.makedirs(opt.data_dir)
    if opt.mode == "Train":
        if not os.path.exists(opt.model_dir):
            os.makedirs(opt.model_dir)
        log_dir = opt.model_dir
        log_path = log_dir + "/train.log"
    if opt.mode == "Test":
        if not os.path.exists(opt.test_dir):
            os.makedirs(opt.test_dir)
        log_dir = opt.test_dir
        log_path = log_dir + "/train_Epoch2.log"

    # save options to disk
    util.opt2file(opt, log_dir + "/opt.txt")

    # log setting
    #log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    log_format = '%(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)

    # load train or test data
    data_loader = MultiLabelDataLoader(opt)
    if opt.mode == "Train":
        train_set = data_loader.GetTrainSet()
        val_set = data_loader.GetValSet()
    elif opt.mode == "Test":
        test_set = data_loader.GetTestSet()

    num_classes = data_loader.GetNumClasses()
    rid2name = data_loader.GetRID2Name()
    id2rid = data_loader.GetID2RID()
    opt.class_num = len(num_classes)

    # load model
    model = load_model(opt, num_classes)

    # define loss function
    criterion = nn.CrossEntropyLoss(weight=opt.loss_weight)

    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion = criterion.cuda(opt.devices[0])
        cudnn.benchmark = True

    # Train model
    if opt.mode == "Train":
        train(model, criterion, train_set, val_set, opt, (rid2name, id2rid))
    elif opt.mode == "Test-Train":
        train(model, criterion, test_set, val_set, opt, (rid2name, id2rid))
    # Test model
    elif opt.mode == "Test":
        test(model, criterion, test_set, opt)
Ejemplo n.º 10
0
import os
from tqdm import tqdm
from torchvision import utils

from renderer.face_model import FaceModel
from options.options import Options
from utils.util import create_dir, load_coef

if __name__ == '__main__':
    opt = Options().parse_args()

    create_dir(os.path.join(opt.src_dir, 'reenact'))

    alpha_list = load_coef(os.path.join(opt.tgt_dir, 'alpha'), opt.test_num)
    beta_list = load_coef(os.path.join(opt.tgt_dir, 'beta'), opt.test_num)
    delta_list = load_coef(os.path.join(opt.src_dir, 'reenact_delta'),
                           opt.test_num)
    gamma_list = load_coef(os.path.join(opt.tgt_dir, 'gamma'), opt.test_num)
    angle_list = load_coef(os.path.join(opt.tgt_dir, 'rotation'), opt.test_num)
    translation_list = load_coef(os.path.join(opt.tgt_dir, 'translation'),
                                 opt.test_num)

    face_model = FaceModel(data_path=opt.matlab_data_path, batch_size=1)

    for i in tqdm(range(len(delta_list))):
        alpha = alpha_list[i + opt.offset].unsqueeze(0).cuda()
        beta = beta_list[i + opt.offset].unsqueeze(0).cuda()
        delta = delta_list[i].unsqueeze(0).cuda()
        gamma = gamma_list[i + opt.offset].unsqueeze(0).cuda()
        rotation = angle_list[i + opt.offset].unsqueeze(0).cuda()
        translation = translation_list[i + opt.offset].unsqueeze(0).cuda()
Ejemplo n.º 11
0
# __**__ coding=utf-8 __**__
# 作者:calm_zn
# 日期:2021/3/25 17:14
# 工具:PyCharm
# Python版本:3.7

from selenium import webdriver
from options.options import Options

driver = webdriver.Chrome(options=Options().chrome_options())
driver.get('https://www.100.nxdev.cn/')
driver.implicitly_wait(10)
driver.find_element('xpath', '/html/body/div/div[1]/div/div[2]').click()
driver.find_element()
Ejemplo n.º 12
0
import os
from tqdm import tqdm

from options.options import Options
from datasets import create_dataset
from models import audio_expression_model
from utils.util import create_dir

if __name__ == '__main__':
    opt = Options().parse_args()  # get training options
    # hard-code some parameters for test
    opt.isTrain = False
    opt.num_threads = 0  # test code only supports num_threads = 1
    opt.batch_size = 1  # test code only supports batch_size = 1
    opt.serial_batches = True  # disable data shuffling; comment this line if results on randomly chosen images are needed.

    create_dir(os.path.join(opt.data_dir, 'reenact_delta'))

    dataset = create_dataset(opt)

    model = audio_expression_model.AudioExpressionModel(opt)
    model.load_network()
    model.eval()

    for i, data in enumerate(tqdm(dataset)):
        model.set_input(data)  # unpack data from data loader
        model.test()  # run inference
        model.save_delta()

        if i >= opt.test_num - 1:
            break
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    trainer_dir = "trainer_" + opt.name
    opt.model_dir = os.path.join(opt.dir, trainer_dir, "Train")
    opt.data_dir = os.path.join(opt.dir, trainer_dir, "Data")
    opt.test_dir = os.path.join(opt.dir, trainer_dir, "Test")

    if not os.path.exists(opt.data_dir):
        os.makedirs(opt.data_dir)
    if opt.mode == "Train":
        if not os.path.exists(opt.model_dir):
            os.makedirs(opt.model_dir)
        log_dir = opt.model_dir
        log_path = log_dir + "/train.log"
    if opt.mode == "Test":
        if not os.path.exists(opt.test_dir):
            os.makedirs(opt.test_dir)
        log_dir = opt.test_dir
        log_path = log_dir + "/test.log"

    # save options to disk
    util.opt2file(opt, log_dir + "/opt.txt")

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)

    # load train or test data
    ds = DeepFashionDataset(opt)
    num_data = len(ds)
    indices = list(range(num_data))
    split = int((opt.ratio[1] + opt.ratio[2]) * num_data)
    validation_Test_idx = np.random.choice(indices, size=split, replace=False)
    train_idx = list(set(indices) - set(validation_Test_idx))
    train_sampler = SubsetRandomSampler(train_idx)
    # validation Set
    split = int(round(0.5 * len(validation_Test_idx)))
    validation_idx = np.random.choice(validation_Test_idx, size=split, replace=False)
    validation_sampler = SubsetRandomSampler(validation_idx)
    # Test set
    test_idx = list(set(validation_Test_idx) - set(validation_idx))
    test_sampler = SubsetRandomSampler(test_idx)

    train_set = DataLoader(ds, batch_size=opt.batch_size, shuffle=False, sampler=train_sampler)
    val_set= DataLoader(ds, batch_size=opt.batch_size, shuffle=False, sampler=validation_sampler)
    test_set = DataLoader(ds, batch_size=opt.batch_size, shuffle=False, sampler=test_sampler)



    num_classes = [opt.numctg,opt.numattri] #temporary lets put the number of class []
    opt.class_num = len(num_classes)

    # load model
    model = load_model(opt, num_classes)

    # define loss function
    criterion_softmax = nn.CrossEntropyLoss(weight=opt.loss_weight)
    criterion_binary=torch.nn.BCELoss()


    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion_softmax = criterion_softmax.cuda(opt.devices[0])
        criterion_binary= criterion_binary.cuda(opt.devices[0])
        cudnn.benchmark = True

    # Train model
    if opt.mode == "Train":
        train(model, criterion_softmax,criterion_binary, train_set, val_set, opt)
    # Test model
    elif opt.mode == "Test":
        test(model, criterion_softmax,criterion_binary, test_set, opt)
Ejemplo n.º 14
0
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # save log to disk
    if opt.mode == "Train":
        log_path = opt.out_dir + "/train.log"

    # save options to disk
    util.opt2file(opt, opt.out_dir + "/opt.txt")

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)

    # load train or test data
    data_loader = PoseDataLoader(opt)
    if opt.mode == "Train":
        train_set = data_loader.GetTrainSet()
        val_set = data_loader.GetValSet()

    # load model
    model = load_model(opt)

    # define loss function
    criterion = JointsMSELoss(opt)

    # define optimizer
    if opt.optim == 'Adam':
        optimizer = optim.Adam(model.parameters(),
                               opt.lr,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=opt.weight_decay,
                               amsgrad=False)
    else:
        optimizer = optim.SGD(model.parameters(),
                              opt.lr,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)
    # define laerning rate scheluer
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=opt.lr_decay_in_epoch,
                                          gamma=opt.gamma)

    # use cuda
    if len(opt.device_ids) == 1:
        model = model.cuda(opt.device_ids[0])
        cudnn.benchmark = True
    elif len(opt.device_ids) > 1:
        model = nn.DataParallel(model.cuda(opt.device_ids[0]),
                                device_ids=opt.device_ids)
        cudnn.benchmark = True

    # Train model
    if opt.mode == "Train":
        train(model, criterion, train_set, val_set, optimizer, scheduler, opt)
def main():

    print("parse opt...")

    # parse options
    op = Options()
    opt = op.parse()

    # initialize train or test working dir
    opt.model_dir = os.path.join("results", opt.name)
    logging.info("Model directory: %s" % opt.model_dir)

    if not os.path.exists(opt.model_dir):
        os.makedirs(opt.model_dir)
    log_dir = opt.model_dir
    log_path = log_dir + "/train.log"

    # save options to disk
    util.opt2file(opt, log_dir + "/opt.txt")

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(log_path, 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    log_level = logging.INFO
    logging.getLogger().setLevel(log_level)
    '''
    pkl_file = "%s/metadata.pkl" % opt.data_dir
    if not os.path.exists(pkl_file):
        # If metadata file does not exist, manually create it
        # from the txt files and save a pkl.
        filenames, attrs = get_list_attr_img(opt.data_dir)
        categories = get_list_category_img(opt.data_dir)
        with open(pkl_file, "wb") as f:
            pickle.dump({'filenames': filenames,
                         'attrs': attrs,
                         'categories': categories}, f)
    else:
        logging.info("Found %s..." % pkl_file)
        with open(pkl_file, "rb") as f:
            dat = pickle.load(f)
            filenames = dat['filenames']
            attrs = dat['attrs']
            categories = dat['categories']
    '''

    attrs = get_list_attr_img(opt.data_dir)
    categories = get_list_category_img(opt.data_dir)
    bboxes = get_bboxes(opt.data_dir)

    indices = list(range(len(attrs.keys())))
    rnd_state = np.random.RandomState(0)
    rnd_state.shuffle(indices)
    train_idx = indices[0:int(0.9 * len(indices))]
    valid_idx = indices[int(0.9 * len(indices)):int(0.95 * len(indices))]
    test_idx = indices[int(0.95 * len(indices))::]

    # Define datasets.
    ds_train = DeepFashionDataset(root=opt.data_dir,
                                  indices=train_idx,
                                  attrs=attrs,
                                  categories=categories,
                                  bboxes=bboxes,
                                  data_aug=opt.data_aug,
                                  img_size=opt.img_size,
                                  crop_size=opt.crop_size)
    ds_valid = DeepFashionDataset(root=opt.data_dir,
                                  indices=valid_idx,
                                  attrs=attrs,
                                  categories=categories,
                                  bboxes=bboxes,
                                  data_aug=opt.data_aug,
                                  img_size=opt.img_size,
                                  crop_size=opt.crop_size)
    '''
    ds_test = DeepFashionDataset(root=opt.data_dir,
                                 indices=test_idx,
                                 img_size=opt.img_size,
                                 crop_size=opt.crop_size)
    '''
    # Define data loaders.
    loader_train = DataLoader(ds_train,
                              shuffle=True,
                              batch_size=opt.batch_size,
                              num_workers=opt.num_workers)
    loader_valid = DataLoader(ds_valid,
                              shuffle=False,
                              batch_size=opt.batch_size,
                              num_workers=opt.num_workers)
    '''
    loader_test = DataLoader(ds_train,
                             shuffle=False,
                             batch_size=opt.batch_size,
                             num_workers=1)
    '''

    # load model
    model = FashionResnet(50, 1000, opt.resnet_type)
    logging.info(model)

    if opt.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(), lr=opt.lr, eps=opt.eps)
    else:
        optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9)

    # load exsiting model
    last_epoch = 0
    if opt.resume is not None:
        if opt.resume == 'auto':
            import glob
            # List all the pkl files.
            files = glob.glob("%s/*.pth" % opt.model_dir)
            # Make them absolute paths.
            files = [os.path.abspath(key) for key in files]
            if len(files) > 0:
                # Get creation time and use that.
                latest_chkpt = max(files, key=os.path.getctime)
                logging.info("Auto-resume mode found latest checkpoint: %s" %
                             latest_chkpt)
                last_epoch = load_model(model,
                                        latest_chkpt,
                                        optimizer,
                                        devices=opt.devices)
        else:
            logging.info("Loading checkpoint: %s" % opt.resume)
            last_epoch = load_model(model,
                                    opt.resume,
                                    optimizer,
                                    devices=opt.devices)

    #Weight_attribute = get_weight_attr_img(opt)

# print(len(Weight_attribute))
# define loss function
    criterion_softmax = nn.CrossEntropyLoss()  #weight=opt.loss_weight
    if opt.loss == 'bce':
        if opt.pos_weights:
            logging.info("Using pos_weights...")
            pos_weights = (1 - attrs).sum(dim=0) / attrs.sum(dim=0)
            # Scale pos_weights such that its maximum value will be == pos_weights_scale.
            # This is in case pos_weights has too big of a range.
            pos_weights = pos_weights / (pos_weights.max() /
                                         opt.pos_weights_scale)
            criterion_binary = torch.nn.BCEWithLogitsLoss(
                pos_weight=pos_weights, reduction='none')
        else:
            criterion_binary = torch.nn.BCEWithLogitsLoss(reduction='none')
    else:
        if opt.pos_weights:
            raise Exception("`pos_weights` only works with BCE loss!")
        criterion_binary = HingeLoss()

    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        criterion_softmax = criterion_softmax.cuda(opt.devices[0])
        criterion_binary = criterion_binary.cuda(opt.devices[0])

    # float16
    if opt.fp16:
        if not amp_imported:
            raise Exception("""Was not able to import apex library. This is
                required for float16 mode.""")
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          enabled=True,
                                          opt_level='O1')

    # Train model
    if opt.mode == "train":
        logging.info("Running in train mode")
        train(model=model,
              optimizer=optimizer,
              criterion_softmax=criterion_softmax,
              criterion_binary=criterion_binary,
              train_loader=loader_train,
              val_loader=loader_valid,
              opt=opt,
              epoch=last_epoch)
    # Test model
    elif opt.mode == "validate":
        logging.info("Running in validate mode")
        accs = forward_dataset(model, criterion_softmax, criterion_binary,
                               loader_valid, opt)
        for key in accs:
            print("%s --> %.4f +/- %.4f" %
                  (key, np.mean(accs[key]), np.std(accs[key])))
    elif opt.mode == "test":
        logging.info("Running in test mode")
        ds_test = DeepFashionDataset(root=opt.data_dir,
                                     indices=test_idx,
                                     attrs=attrs,
                                     categories=categories,
                                     bboxes=bboxes,
                                     img_size=opt.img_size,
                                     crop_size=opt.crop_size)
        loader_test = DataLoader(ds_test,
                                 shuffle=False,
                                 batch_size=opt.batch_size,
                                 num_workers=opt.num_workers)
        accs = forward_dataset(model, criterion_softmax, criterion_binary,
                               loader_test, opt)
        for key in accs:
            print("%s --> %.4f +/- %.4f" %
                  (key, np.mean(accs[key]), np.std(accs[key])))
Ejemplo n.º 16
0
from options.options import Options
args = Options().parse()

import json
import numpy as np
import os
import neural_renderer as nr
# Used in diff_render.py

# TODO: remove this once I fix the environment
import torch


# This is the code used to help the neural renderer
def load_json(json_file):
    with open(json_file, 'r') as f:
        var = json.load(f)
    return var


import pywavefront as pwf
# For loading meshes

import pymesh
# For re-meshing objects

suncg_obj_dir = os.path.join(args.suncg_data_dir, "object")
suncg_room_dir = os.path.join(args.suncg_data_dir, "room")

suncg_valid_types = load_json("metadata/valid_types.json")
Ejemplo n.º 17
0
import os
import numpy as np
from models import vgg19_model
from algorithms import neural_best_buddies as NBBs
from util import util
from util import MLS
from util import order as ORDER
import warnings
from numpy import median
from options.options import Options

opt = Options().parse()
vgg19 = vgg19_model.define_Vgg19(opt)
save_dir = os.path.join(opt.results_dir, opt.name)
input_dir = os.path.join(opt.input_dir, opt.name)
nbbs = NBBs.sparse_semantic_correspondence(vgg19, opt.gpu_ids, opt.tau,
                                           opt.border_size, save_dir,
                                           opt.k_per_level, opt.k_final,
                                           opt.fast)
image_count, source_index, target_index = util.arrangeSourceDir(
    input_dir, opt.sourceImg, opt.targetImg)
# -----create new matrix for the ordering of the pics ----------------
matrix = ORDER.create_matrix(image_count)
points_matrix = util.create_points_matrix(image_count)
#---------------------------------------------------------------------
data_file_path = save_dir + '/distance_data.dat'
point_file_path = save_dir + '/points_matrix_data.dat'
with warnings.catch_warnings():
    warnings.simplefilter("ignore")
    if ((opt.contiue_calc) | (opt.data_stored)):
        print("LOADED DATA:")
Ejemplo n.º 18
0
def main():
    _options = Options(sys.argv)
    _cmdparser = CmdLine()
    _cmdparser.parse(_options)

    tasks.runAllTasks(_options)
Ejemplo n.º 19
0
import os
import cv2
import numpy as np
from util.pytorch_utils import generate_rect_mask, generate_stroke_mask
from model.pytorch.net import InpaintingModel_GMCNN
from options.options import Options

path_in = 'imgs/celebahq_256x256/'
path_out = 'results/celebahq_256x256/'
path_dataset = 'model-celeb-256-rect/model-celeb-256-rect.pth'

images = os.listdir(path_in)

config = Options().parse()

model = InpaintingModel_GMCNN(in_channels=4, opt=config)
model.load_networks(path_dataset)

for img_file in images:
    image = cv2.imread(path_in + img_file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    h, w, c = image.shape
    mask, _ = generate_rect_mask(im_size=[h, w, c], mask_size=[128, 128])
    # mask = generate_stroke_mask(im_size=[h, w, c])

    image = np.transpose(image, [2, 0, 1])
    image = np.expand_dims(image, axis=0)

    input_img = np.transpose(image[0][::-1, :, :], [1, 2, 0])

    image_vis = image * (1 - mask) + 255 * mask
Ejemplo n.º 20
0
def main():
    # parse options
    op = Options()
    opt = op.parse()

    # special setting
    opt.shuffle = False
    opt.batch_size = 1
    opt.load_thread = 1

    # initialize train or test working dir
    test_dir = os.path.join(opt.classify_dir, opt.name)
    opt.model_dir = opt.dir + "/trainer_" + opt.name + "/Train/"
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)

    # save options to disk
    opt2file(opt, os.path.join(test_dir, "opt.txt"))

    # log setting
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    formatter = logging.Formatter(log_format)
    fh = logging.FileHandler(test_dir + "/deploy.log", 'a')
    fh.setFormatter(formatter)
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.getLogger().addHandler(fh)
    logging.getLogger().addHandler(ch)
    logging.getLogger().setLevel(logging.INFO)

    # load label
    if opt.label_file == "":
        opt.label_file = opt.dir + "/label.txt"
    rid2name, id2rid, rid2id = load_label(opt.label_file)
    num_classes = [len(rid2name[index]) - 2 for index in range(len(rid2name))]

    # load transformer
    transformer = get_transformer(opt)

    # load model
    model = load_model(opt, num_classes)
    model.eval()

    # use cuda
    if opt.cuda:
        model = model.cuda(opt.devices[0])
        cudnn.benchmark = True

    l = open(test_dir + "/classify_res_data.txt", 'w')
    with open(opt.classify_dir + "/data.txt") as data:
        for num, line in enumerate(data):
            logging.info(str(num + 1))
            line = json.loads(line)
            input_tensor = load_image(line["image_file"], line["box"], opt,
                                      transformer)
            input_tensor = input_tensor.unsqueeze(0)
            if opt.cuda:
                input_tensor = input_tensor.cuda(opt.devices[0])
            outputs = model(Variable(input_tensor, volatile=True))
            if not isinstance(outputs, list):
                outputs = [outputs]
            line["classify_res"] = list()
            for index, out in enumerate(outputs):
                out = out.cpu()
                #print "out:", out
                softmax = F.softmax(out, dim=1).data.squeeze()
                #print "softmax:", softmax
                probs, ids = softmax.sort(0, True)
                classify_res = {}
                for i in range(len(probs)):
                    classify_res[rid2name[index][id2rid[index][
                        ids[i]]]] = probs[i]
                classify_res["max_score"] = probs[0]
                classify_res["best_label"] = rid2name[index][id2rid[index][
                    ids[0]]]
                line["classify_res"].append(classify_res)
            l.write(json.dumps(line, separators=(',', ':')) + '\n')
    l.close()
    logging.info("classification done")