def main(args):
    config_yaml = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
    if not os.path.exists(args.config):
        raise FileNotFoundError('provided config file does not exist: %s' % args.config)

    if 'restart_log_dir_path' not in config_yaml['simclr']['train'].keys():
        config_yaml['simclr']['train']['restart_log_dir_path'] = None

    if args.data_dir_path is not None:
        config_yaml['simclr']['train']['data_dir_path'] = args.data_dir_path
        print('yo!: ', args.data_dir_path)

    config_yaml['logger_name'] = 'logreg'
    config = SimCLRConfig(config_yaml)

    if not os.path.exists(config.base.output_dir_path):
        os.mkdir(config.base.output_dir_path)

    if not os.path.exists(config.base.log_dir_path):
        os.makedirs(config.base.log_dir_path)

    logger = setup_logger(config.base.logger_name, config.base.log_file_path)
    logger.info('using config: %s' % config)

    config_copy_file_path = os.path.join(config.base.log_dir_path, 'config.yaml')
    shutil.copy(args.config, config_copy_file_path)

    writer = SummaryWriter(log_dir=config.base.log_dir_path)

    if not os.path.exists(args.model):
        raise FileNotFoundError('provided model directory does not exist: %s' % args.model)
    else:
        logger.info('using model directory: %s' % args.model)

    config.logistic_regression.model_path = args.model
    logger.info('using model_path: {}'.format(config.logistic_regression.model_path))

    config.logistic_regression.epoch_num = args.epoch_num
    logger.info('using epoch_num: {}'.format(config.logistic_regression.epoch_num))

    model_file_path = Path(config.logistic_regression.model_path).joinpath(
        'checkpoint_' + config.logistic_regression.epoch_num + '.pth')
    if not os.path.exists(model_file_path):
        raise FileNotFoundError('model file does not exist: %s' % model_file_path)
    else:
        logger.info('using model file: %s' % model_file_path)

    train_dataset, val_dataset, test_dataset, classes = Datasets.get_datasets(config,
                                                                              img_size=config.logistic_regression.img_size)
    num_classes = len(classes)

    train_loader, val_loader, test_loader = Datasets.get_loaders(config, train_dataset, val_dataset, test_dataset)

    simclr_model = load_simclr_model(config)
    simclr_model = simclr_model.to(config.base.device)
    simclr_model.eval()

    model = LogisticRegression(simclr_model.num_features, num_classes)
    model = model.to(config.base.device)

    learning_rate = config.logistic_regression.learning_rate
    momentum = config.logistic_regression.momentum
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, nesterov=True)
    criterion = torch.nn.CrossEntropyLoss()

    logger.info("creating features from pre-trained context model")
    (train_x, train_y, test_x, test_y) = get_features(
        config, simclr_model, train_loader, test_loader
    )

    feature_train_loader, feature_test_loader = get_data_loaders(
        config, train_x, train_y, test_x, test_y
    )

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0
    best_epoch = 0
    best_loss = 0

    for epoch in range(config.logistic_regression.epochs):
        loss_epoch, accuracy_epoch = train(
            config, feature_train_loader, model, criterion, optimizer
        )

        loss = loss_epoch / len(train_loader)
        accuracy = accuracy_epoch / len(train_loader)

        writer.add_scalar("Loss/train_epoch", loss, epoch)
        writer.add_scalar("Accuracy/train_epoch", accuracy, epoch)
        logger.info(
            "epoch [%3.i|%i] -> train loss: %f, accuracy: %f" % (
                epoch + 1, config.logistic_regression.epochs, loss, accuracy)
        )

        if accuracy > best_acc:
            best_loss = loss
            best_epoch = epoch + 1
            best_acc = accuracy
            best_model_wts = copy.deepcopy(model.state_dict())

    model.load_state_dict(best_model_wts)
    logger.info(
        "train dataset performance -> best epoch: {}, loss: {}, accuracy: {}".format(best_epoch, best_loss, best_acc, )
    )

    loss_epoch, accuracy_epoch = test(
        config, feature_test_loader, model, criterion
    )

    loss = loss_epoch / len(test_loader)
    accuracy = accuracy_epoch / len(test_loader)
    logger.info(
        "test dataset performance -> best epoch: {}, loss: {}, accuracy: {}".format(best_epoch, loss, accuracy)
    )
Example #2
0
    # Set class names in config file based on IMDB
    class_names = imdb.classes
    cfg_from_list(['CLASS_NAMES', [class_names]])

    if args.alpha:
        cfg_from_list(['LRP_HAI.ALPHA', True])

    # Update config to match start of training detector
    cfg_from_list(['LRP_HAI_TRAIN.DET_START', args.det_start])

    # output directory where the models are saved
    output_dir = get_output_dir(imdb, args.tag, args.save_path)

    logger = setup_logger("LRP-HAI",
                          save_dir=args.save_path,
                          filename="log_train.txt")
    logger.info('Called with args:')
    logger.info(args)
    logger.info('Using attention alpha:')
    logger.info(cfg.LRP_HAI.ALPHA)
    logger.info('Using config:\n{}'.format(pprint.pformat(cfg)))
    logger.info('{:d} roidb entries'.format(len(roidb)))
    logger.info('Output will be saved to `{:s}`'.format(output_dir))

    # also add the validation set, but with no flipping images
    orgflip = cfg.TRAIN.USE_FLIPPED
    cfg.TRAIN.USE_FLIPPED = False
    _, valroidb = combined_roidb(args.imdbval_name)
    logger.info('{:d} validation roidb entries'.format(len(valroidb)))
    cfg.TRAIN.USE_FLIPPED = orgflip
Example #3
0
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    assert os.path.exists(args.image_list)
    image_list_name = os.path.splitext(os.path.basename(args.image_list))[0]
    output_dir = args.output_dir or f'results/inversion/{image_list_name}'
    logger = setup_logger(output_dir, 'inversion.log', 'inversion_logger')

    logger.info(f'Loading model.')
    tflib.init_tf({'rnd.np_random_seed': 1000})
    with open(args.model_path, 'rb') as f:
        E, _, _, Gs = pickle.load(f)

    # Get input size.
    image_size = E.input_shape[2]
    assert image_size == E.input_shape[3]

    # Build graph.
    logger.info(f'Building graph.')
    sess = tf.get_default_session()
    input_shape = E.input_shape
    input_shape[0] = args.batch_size
    x = tf.placeholder(tf.float32, shape=input_shape, name='real_image')
    x_255 = (tf.transpose(x, [0, 2, 3, 1]) + 1) / 2 * 255
    latent_shape = Gs.components.synthesis.input_shape
    latent_shape[0] = args.batch_size
    wp = tf.get_variable(shape=latent_shape, name='latent_code')
    x_rec = Gs.components.synthesis.get_output_for(wp, randomize_noise=False)
    x_rec_255 = (tf.transpose(x_rec, [0, 2, 3, 1]) + 1) / 2 * 255
    if args.random_init:
        logger.info(f'  Use random initialization for optimization.')
        wp_rnd = tf.random.normal(shape=latent_shape, name='latent_code_init')
        setter = tf.assign(wp, wp_rnd)
    else:
        logger.info(
            f'  Use encoder output as the initialization for optimization.')
        w_enc = E.get_output_for(x, is_training=False)
        wp_enc = tf.reshape(w_enc, latent_shape)
        setter = tf.assign(wp, wp_enc)

    # Settings for optimization.
    logger.info(f'Setting configuration for optimization.')
    perceptual_model = PerceptualModel([image_size, image_size], False)
    x_feat = perceptual_model(x_255)
    x_rec_feat = perceptual_model(x_rec_255)
    loss_feat = tf.reduce_mean(tf.square(x_feat - x_rec_feat), axis=[1])
    loss_pix = tf.reduce_mean(tf.square(x - x_rec), axis=[1, 2, 3])
    if args.domain_regularizer:
        logger.info(f'  Involve encoder for optimization.')
        w_enc_new = E.get_output_for(x_rec, is_training=False)
        wp_enc_new = tf.reshape(w_enc_new, latent_shape)
        loss_enc = tf.reduce_mean(tf.square(wp - wp_enc_new), axis=[1, 2])
    else:
        logger.info(f'  Do NOT involve encoder for optimization.')
        loss_enc = 0
    loss = (loss_pix + args.loss_weight_feat * loss_feat +
            args.loss_weight_enc * loss_enc)
    optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
    train_op = optimizer.minimize(loss, var_list=[wp])
    tflib.init_uninitialized_vars()

    # Load image list.
    logger.info(f'Loading image list.')
    image_list = []
    with open(args.image_list, 'r') as f:
        for line in f:
            image_list.append(line.strip())

    # Invert images.
    logger.info(f'Start inversion.')
    save_interval = args.num_iterations // args.num_results
    headers = ['Name', 'Original Image', 'Encoder Output']
    for step in range(1, args.num_iterations + 1):
        if step == args.num_iterations or step % save_interval == 0:
            headers.append(f'Step {step:06d}')
    viz_size = None if args.viz_size == 0 else args.viz_size
    visualizer = HtmlPageVisualizer(num_rows=len(image_list),
                                    num_cols=len(headers),
                                    viz_size=viz_size)
    visualizer.set_headers(headers)

    images = np.zeros(input_shape, np.uint8)
    names = ['' for _ in range(args.batch_size)]
    latent_codes_enc = []
    latent_codes = []
    for img_idx in tqdm(range(0, len(image_list), args.batch_size),
                        leave=False):
        # Load inputs.
        batch = image_list[img_idx:img_idx + args.batch_size]
        for i, image_path in enumerate(batch):
            image = resize_image(load_image(image_path),
                                 (image_size, image_size))
            images[i] = np.transpose(image, [2, 0, 1])
            names[i] = os.path.splitext(os.path.basename(image_path))[0]
        inputs = images.astype(np.float32) / 255 * 2.0 - 1.0
        # Run encoder.
        sess.run([setter], {x: inputs})
        outputs = sess.run([wp, x_rec])
        latent_codes_enc.append(outputs[0][0:len(batch)])
        outputs[1] = adjust_pixel_range(outputs[1])
        for i, _ in enumerate(batch):
            image = np.transpose(images[i], [1, 2, 0])
            save_image(f'{output_dir}/{names[i]}_ori.png', image)
            save_image(f'{output_dir}/{names[i]}_enc.png', outputs[1][i])
            visualizer.set_cell(i + img_idx, 0, text=names[i])
            visualizer.set_cell(i + img_idx, 1, image=image)
            visualizer.set_cell(i + img_idx, 2, image=outputs[1][i])
        # Optimize latent codes.
        col_idx = 3
        for step in tqdm(range(1, args.num_iterations + 1), leave=False):
            sess.run(train_op, {x: inputs})
            if step == args.num_iterations or step % save_interval == 0:
                outputs = sess.run([wp, x_rec])
                outputs[1] = adjust_pixel_range(outputs[1])
                for i, _ in enumerate(batch):
                    if step == args.num_iterations:
                        save_image(f'{output_dir}/{names[i]}_inv.png',
                                   outputs[1][i])
                    visualizer.set_cell(i + img_idx,
                                        col_idx,
                                        image=outputs[1][i])
                col_idx += 1
        latent_codes.append(outputs[0][0:len(batch)])

    # Save results.
    os.system(f'cp {args.image_list} {output_dir}/image_list.txt')
    np.save(f'{output_dir}/encoded_codes.npy',
            np.concatenate(latent_codes_enc, axis=0))
    np.save(f'{output_dir}/inverted_codes.npy',
            np.concatenate(latent_codes, axis=0))
    visualizer.save(f'{output_dir}/inversion.html')
Example #4
0
from tempfile import NamedTemporaryFile
from ansible import callbacks
from ansible import utils

from tempfile import NamedTemporaryFile
import ConfigParser
import jinja2
import sys
import os

# Read the coonfig file
CONFIG = ConfigParser.ConfigParser()
CONFIG.read('cloud.ini')

# Simple logger
LOGGER = logger.setup_logger("new_instance")

# Ansible loggers, courtesy of https://serversforhackers.com/running-ansible-programmatically
utils.VERBOSITY = 1
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
stats = callbacks.AggregateStats()
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)

# check_pubkey, if it does not exist will create an "ansible" key
instances.check_pubkey(CONFIG.get("instances", "key_name"),
                       CONFIG.get("instances", "pub_key_path"))

# Let's create the server
server = instances.create_server('%s' % (sys.argv[1]),
                                 CONFIG.get("instances", "flavor"),
                                 CONFIG.get("instances", "image"),
Example #5
0
def main():
    global args
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)
    train_left_img.sort()
    train_right_img.sort()
    train_left_disp.sort()

    test_left_img.sort()
    test_right_img.sort()
    test_left_disp.sort()

    __normalize = {'mean': [0.0, 0.0, 0.0], 'std': [1.0, 1.0, 1.0]}
    TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        train_left_img,
        train_right_img,
        train_left_disp,
        True,
        normalize=__normalize),
                                                 batch_size=args.train_bsize,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 drop_last=False)

    TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        test_left_img,
        test_right_img,
        test_left_disp,
        False,
        normalize=__normalize),
                                                batch_size=args.test_bsize,
                                                shuffle=False,
                                                num_workers=4,
                                                drop_last=False)

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)
    log = logger.setup_logger(args.save_path + '/training.log')
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ':' + str(value))

    model = StereoNet(k=args.stages - 1,
                      r=args.stages - 1,
                      maxdisp=args.maxdisp)
    model = nn.DataParallel(model).cuda()
    model.apply(weights_init)
    print('init with normal')

    optimizer = optim.RMSprop(model.parameters(), lr=args.lr)
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=args.stepsize,
                                    gamma=args.gamma)

    log.info('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    args.start_epoch = 0

    if args.resume:
        if os.path.isfile(args.resume):
            log.info("=> loading checkpoint '{}'".format((args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            log.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            log.info("=> no checkpoint found at '{}'".format(args.resume))
            log.info("=> will start from scratch.")
    else:
        log.info("Not Resume")
    start_full_time = time.time()
    for epoch in range(args.start_epoch, args.epoch):
        log.info('This is {}-th epoch'.format(epoch))

        train(TrainImgLoader, model, optimizer, log, epoch)

        savefilename = args.save_path + '/checkpoint.pth'
        torch.save(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }, savefilename)
        scheduler.step()  # will adjust learning rate

    test(TestImgLoader, model, log)
    log.info('full training time = {: 2f} Hours'.format(
        (time.time() - start_full_time) / 3600))
Example #6
0
                        default=3.0,
                        help='End distance for manipulation. (default: 3.0)')
    parser.add_argument('--manipulate_layers',
                        type=str,
                        default='6-11',
                        help='Indices of the layers to perform manipulation. '
                        'Active ONLY when `layerwise_manipulation` is set '
                        'as `True`. If not specified, all layers will be '
                        'manipulated. More than one layers should be '
                        'separated by `,`. (default: None)')
    args = parser.parse_args()

    work_dir = 'manipulation_results'
    os.makedirs(work_dir, exist_ok=True)
    prefix = f'{args.model_name}_{args.boundary_name}'
    logger = setup_logger(work_dir, '', 'logger')

    logger.info(f'Initializing generator.')
    model = build_generator(args.model_name, logger=logger)

    logger.info(f'Preparing latent codes.')
    if os.path.isfile(args.latent_codes_path):
        logger.info(f'  Load latent codes from `{args.latent_codes_path}`.')
        latent_codes = np.load(args.latent_codes_path)
        latent_codes = model.preprocess(
            latent_codes=latent_codes,
            latent_space_type=args.latent_space_type)
    else:
        logger.info(f'  Sample latent codes randomly.')
        latent_codes = model.easy_sample(
            num=args.num, latent_space_type=args.latent_space_type)
Example #7
0
import re
import shutil
from collections import defaultdict

from requests_html import HTML

from crawler.requestHandler import RequestHandler
from utils.logger import setup_logger

logger = setup_logger()


def call_func(fun):
    fun.is_callable = True
    return fun


class Metadata(defaultdict):
    """
    A dictionary supporting dot notation. and nested access
    do not allow to convert existing dict object recursively
    """
    def __init__(self):
        super(Metadata, self).__init__(Metadata)

    def __getattr__(self, key):
        try:
            return self[key]
        except KeyError:
            return ""
def main():
    # load confiig
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("--imgs_path",
                        default="",
                        help="path to images file",
                        type=str)
    parser.add_argument("--det_fname",
                        default="",
                        help="path to det file",
                        type=str)
    parser.add_argument("--save_feats_fname",
                        default="",
                        help="shortname of image",
                        type=str)
    parser.add_argument("--fresh_feats",
                        default=False,
                        help="whether to refresh feat",
                        action='store_true')
    parser.add_argument("--aug_ms",
                        default=False,
                        help="whether to aug",
                        action='store_true')
    parser.add_argument("--aug_flip",
                        default=False,
                        help="whether to aug",
                        action='store_true')
    parser.add_argument("--aug_centercrop",
                        default=False,
                        help="whether to aug",
                        action='store_true')

    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("feature extract", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    print("Running with config:\n{}".format(cfg))
    # ------------------- extractor feature
    if args.fresh_feats:
        extractor = Extractor(cfg)

        with open(args.det_fname, 'r', encoding='utf-8') as fid:
            det_boxes = json.load(fid)

        img_fnames = [args.imgs_path + _x['save_filename'] for _x in det_boxes]
        rescales = [1.0]
        flip_num = 1
        crop_scales = [1.0]
        if args.aug_ms:
            rescales = [0.7, 1.0, 1.4]
        if args.aug_flip:
            flip_num = 2
        if args.aug_centercrop:
            crop_scales = [1.0, 0.7]
        aug_features = []
        for i in range(flip_num):
            for crop_scale in crop_scales:
                for rescale in rescales:
                    # build transform
                    normalize_transform = T.Normalize(
                        mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)
                    h, w = cfg.INPUT.SIZE_TEST
                    if i == 0:
                        transform = T.Compose([
                            T.Resize((int(h * rescale), int(w * rescale)),
                                     interpolation=Image.LANCZOS),
                            T.CenterCrop(
                                (int(h * crop_scale), int(w * crop_scale))),
                            T.ToTensor(), normalize_transform
                        ])
                    else:
                        transform = T.Compose([
                            T.Resize((int(h * rescale), int(w * rescale)),
                                     interpolation=Image.LANCZOS),  #
                            T.CenterCrop(
                                (int(h * crop_scale), int(w * crop_scale))),
                            T.RandomVerticalFlip(
                                1.1),  # T.RandomHorizontalFlip(1.1),
                            T.ToTensor(),
                            normalize_transform
                        ])
                    logger.info(transform)
                    image_set = ImageDataset(img_fnames, transform)
                    image_loader = DataLoader(
                        image_set,
                        sampler=SequentialSampler(image_set),
                        batch_size=cfg.TEST.IMS_PER_BATCH,
                        num_workers=4)
                    features = []
                    with tqdm(total=len(image_loader)) as pbar:
                        for idx, batchs in enumerate(image_loader):
                            features.append(
                                extractor.apply_batch(
                                    batchs.cuda()).cpu().numpy())
                            pbar.update(1)
                    features = np.vstack(features)  #N,F
                    aug_features.append(features)

        features = np.hstack(aug_features)
        np.save(
            os.path.join(output_dir,
                         args.save_feats_fname.replace('.npy', '_cat.npy')),
            features)

        features = aug_features[0]
        for i in range(1, len(aug_features)):
            features += aug_features[i]
        features /= len(aug_features)

        np.save(
            os.path.join(output_dir,
                         args.save_feats_fname.replace('.npy', '_mean.npy')),
            features)
Example #9
0
            pred_s = model(images)

            loss = criterion(pred_s, gts)

            loss.backward()
            clip_gradient(optimizer, opt.clip)
            optimizer.step()
            scheduler.step()
            if rate == 1:
                loss_record.update(loss.data, opt.batchsize)

        if i % 100 == 0 or i == len(train_loader):
            logger.info(
                'Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss: {:.4f}'.
                format(epoch, opt.epochs, i, len(train_loader),
                       loss_record.show()))


if __name__ == '__main__':
    opt = parse_option()
    print(opt)
    os.makedirs(opt.output_dir, exist_ok=True)

    logger = setup_logger(output=opt.output_dir, name="rd3d")
    path = os.path.join(opt.output_dir, "config.json")
    with open(path, 'w') as f:
        json.dump(vars(opt), f, indent=2)
    logger.info("Full config saved to {}".format(path))

    ckpt_path = main(opt)
Example #10
0
import os
import logging

from utils import logger

# Setup the logger
log = logger.setup_logger(__name__, logging.WARNING,
                          logger.defaultLoggingHandler())


def get_image(image):
    """
	Get the absolute path of a passed file (image)

	Arguments:
		image (str) -- location of the file
	"""
    if os.path.isfile(image):
        log.debug("Found image %s", image)
        return os.path.abspath(image)
    else:
        log.critical("Unable to find image %s, exiting...", image)


def get_dir_imgs(img_dir):
    """
	Get a list of all images in a directory

	Arguments:
		img_dir (str) -- the directory where the images are stored
	"""
Example #11
0
def main():
    """Main function."""
    args = parse_args()

    # append task to the output dir path
    args.output_dir = os.path.join(args.output_dir, args.task)
    # create a directory if the output path does not exist
    #if not os.path.exists(args.output_dir):
    #    os.mkdir(args.output_dir)

    logger = setup_logger(args.output_dir, logger_name='generate_data')

    logger.info(f'Initializing generator.')
    gan_type = MODEL_POOL[args.model_name]['gan_type']
    if gan_type == 'pggan':
        model = PGGANGenerator(args.model_name, logger)
        kwargs = {}
    elif gan_type == 'stylegan':
        model = StyleGANGenerator(args.model_name, logger)
        kwargs = {'latent_space_type': args.latent_space_type}
    else:
        raise NotImplementedError(f'Not implemented GAN type `{gan_type}`!')

    logger.info(f'Preparing boundary.')
    args.boundary_path = process_bound_path(gan_type, args)
    if not os.path.isfile(args.boundary_path):
        raise ValueError(f'Boundary `{args.boundary_path}` does not exist!')
    boundary = np.load(args.boundary_path)
    np.save(os.path.join(args.output_dir, 'boundary.npy'), boundary)

    logger.info(f'Preparing latent codes.')
    if args.demo:
        demo_code(gan_type, args)

    if os.path.isfile(args.input_latent_codes_path):
        logger.info(
            f'  Load latent codes from `{args.input_latent_codes_path}`.')
        latent_codes = np.load(args.input_latent_codes_path)
        print(latent_codes.shape)
        if len(latent_codes) > 1:
            latent_codes = np.expand_dims(latent_codes[0], axis=0)
        latent_codes = model.preprocess(latent_codes, **kwargs)
    else:
        logger.info(f'  Sample latent codes randomly.')
        latent_codes = model.easy_sample(args.num, **kwargs)
    np.save(os.path.join(args.output_dir, 'latent_codes.npy'), latent_codes)
    total_num = latent_codes.shape[0]

    logger.info(f'Editing {total_num} samples.')
    for sample_id in tqdm(range(total_num), leave=False):
        attr_index = args.attr_index
        if args.task == 'attribute':
            if args.method == 'interfacegan':
                # baseline modification from initial point
                interpolations = my_linear_interpolate(
                    latent_codes[sample_id:sample_id + 1],
                    attr_index,
                    boundary,
                    'linear',
                    steps=args.steps,
                    gan_type=gan_type,
                    step_size=args.step_size)
                interpolation_id = 0
                for interpolations_batch in model.get_batch_inputs(
                        interpolations):
                    if gan_type == 'pggan':
                        outputs = model.easy_synthesize(interpolations_batch)
                    elif gan_type == 'stylegan':
                        outputs = model.easy_synthesize(
                            interpolations_batch, **kwargs)
                    for image in outputs['image']:
                        save_path = os.path.join(
                            args.output_dir,
                            f'{sample_id:03d}_{interpolation_id:03d}.jpg')
                        cv2.imwrite(save_path, image[:, :, ::-1])
                        interpolation_id += 1

            elif args.method == 'linear':
                # linear baseline attribute modification
                starting_latent_code = latent_codes[sample_id:sample_id +
                                                    1].reshape(1, -1)
                interpolations = my_linear_interpolate(
                    starting_latent_code,
                    attr_index,
                    boundary,
                    'static_linear',
                    steps=args.steps,
                    condition=args.condition,
                    gan_type=gan_type,
                    step_size=args.step_size)
                interpolation_id = 0
                for interpolations_batch in model.get_batch_inputs(
                        interpolations):
                    if gan_type == 'pggan':
                        outputs = model.easy_synthesize(interpolations_batch)
                    elif gan_type == 'stylegan':
                        outputs = model.easy_synthesize(
                            interpolations_batch, **kwargs)
                    for image in outputs['image']:
                        save_path = os.path.join(
                            args.output_dir,
                            f'{sample_id:03d}_{interpolation_id:03d}.jpg')
                        cv2.imwrite(save_path, image[:, :, ::-1])
                        interpolation_id += 1

            elif args.method == 'ours':
                # attribute modification
                starting_latent_code = latent_codes[sample_id:sample_id +
                                                    1].reshape(1, -1)
                interpolations = my_linear_interpolate(
                    starting_latent_code,
                    attr_index,
                    boundary,
                    'piecewise_linear',
                    steps=args.steps,
                    condition=args.condition,
                    gan_type=gan_type,
                    step_size=args.step_size)
                interpolation_id = 0
                for interpolations_batch in model.get_batch_inputs(
                        interpolations):
                    if gan_type == 'pggan':
                        outputs = model.easy_synthesize(interpolations_batch)
                    elif gan_type == 'stylegan':
                        outputs = model.easy_synthesize(
                            interpolations_batch, **kwargs)
                    for image in outputs['image']:
                        save_path = os.path.join(
                            args.output_dir,
                            f'{sample_id:03d}_{interpolation_id:03d}.jpg')
                        cv2.imwrite(save_path, image[:, :, ::-1])
                        interpolation_id += 1

        elif args.task == 'head_pose':
            # pose modification
            starting_latent_code = latent_codes[sample_id:sample_id +
                                                1].reshape(1, -1)
            interpolations = my_linear_interpolate(starting_latent_code,
                                                   attr_index,
                                                   boundary,
                                                   'pose_edit',
                                                   steps=args.steps,
                                                   condition=args.condition,
                                                   gan_type=gan_type,
                                                   step_size=args.step_size,
                                                   direction=args.direction)
            interpolation_id = 0
            for interpolations_batch in model.get_batch_inputs(interpolations):
                if gan_type == 'pggan':
                    outputs = model.easy_synthesize(interpolations_batch)
                elif gan_type == 'stylegan':
                    outputs = model.easy_synthesize(interpolations_batch,
                                                    **kwargs)
                for image in outputs['image']:
                    save_path = os.path.join(
                        args.output_dir,
                        f'{sample_id:03d}_{interpolation_id:03d}.jpg')
                    cv2.imwrite(save_path, image[:, :, ::-1])
                    interpolation_id += 1

        elif args.task == 'landmark':
            # landmark modification
            starting_latent_code = latent_codes[sample_id:sample_id +
                                                1].reshape(1, -1)
            interpolations = my_linear_interpolate(starting_latent_code,
                                                   attr_index,
                                                   boundary,
                                                   'piecewise_linear',
                                                   steps=args.steps,
                                                   is_landmark=True,
                                                   condition=args.condition,
                                                   step_size=args.step_size,
                                                   direction=args.direction)
            interpolation_id = 0
            for interpolations_batch in model.get_batch_inputs(interpolations):
                if gan_type == 'pggan':
                    outputs = model.easy_synthesize(interpolations_batch)
                elif gan_type == 'stylegan':
                    outputs = model.easy_synthesize(interpolations_batch,
                                                    **kwargs)
                for image in outputs['image']:
                    save_path = os.path.join(
                        args.output_dir,
                        f'{sample_id:03d}_{interpolation_id:03d}.jpg')
                    cv2.imwrite(save_path, image[:, :, ::-1])
                    interpolation_id += 1
import io
import pycurl
from utils.logger import setup_logger
import stem.process
import random

logger = setup_logger("connection")

SOCKS_PORT = 7000

EXIT_NODES = [
    {
        "code": "{gb}",
        "country": "United Kingdom"
    },
    {
        "code": "{us}",
        "country": "United States"
    },
    {
        "code": "{sg}",
        "country": "Singapore"
    },
    {
        "code": "{my}",
        "country": "Malaysia"
    },
    {
        "code": "{id}",
        "country": "Indonesia"
    },
Example #13
0
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.spice.spice import Spice
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer

from utils.logger import setup_logger

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="evaluate")
    parser.add_argument("--gt_caption", type=str)
    parser.add_argument("--pd_caption", type=str)
    parser.add_argument("--save_dir", type=str)
    args = parser.parse_args()

    logger = setup_logger("evaluate", args.save_dir, 0)
    ptb_tokenizer = PTBTokenizer()

    scorers = [(Cider(), "C"), (Spice(), "S"),
               (Bleu(4), ["B1", "B2", "B3", "B4"]),
               (Meteor(), "M"), (Rouge(), "R")]

    logger.info(f"loading ground-truths from {args.gt_caption}")
    with open(args.gt_caption) as f:
        gt_captions = json.load(f)
    gt_captions = ptb_tokenizer.tokenize(gt_captions)

    logger.info(f"loading predictions from {args.pd_caption}")
    with open(args.pd_caption) as f:
        pred_dict = json.load(f)
    pd_captions = dict()
Example #14
0
import os
import sys
from config.cfg import Cfg
import torch
from torch.backends import cudnn

sys.path.append('.')
from datasets import make_dataloader
from processor import do_inference
from model import make_model
from utils.logger import setup_logger

if __name__ == "__main__":
    Cfg.freeze()
    log_dir = Cfg.DATALOADER.LOG_DIR
    logger = setup_logger('pose-transfer-avs.test', log_dir)
    logger.info("Running with config:\n{}".format(Cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader = make_dataloader(Cfg)
    model_G, _, _ = make_model(Cfg)
    model_G.load_state_dict(torch.load(Cfg.TEST.WEIGHT))

    do_inference(Cfg, model_G, val_loader)
Example #15
0
import sys
from utils.argsparser import parse_args
from utils.logger import setup_logger, gen_log_filename

# fabric requires paramiko, which in turn require threading, so patching
# the threading module as well which is skipped in (https://github.com/Juniper/
# contrail-sandesh/blob/master/library/python/pysandesh/sandesh_http.py#L19)
# Without the patch in threading, due to incompatability between unpatched
# threading and patched other modules, ssh dosent go through.
from gevent import monkey
monkey.patch_thread()

args = parse_args(sys.argv[1:])
log_file = gen_log_filename(sys.argv[1])
log = setup_logger(log_file)


def main():
    prefix = 'contraildebug.plugins'
    module = __import__('%s.%s' % (prefix, sys.argv[1]), fromlist='_')
    module.main(args)


if __name__ == '__main__':
    sys.exit(main())
def train(cfg):

    logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR)
    logger.info("Running with config:\n{}".format(cfg))

    # prepare dataset
    val_data_loader, num_query = make_val_data_loader(cfg)
    num_classes = np.zeros(len(cfg.DATALOADER.SAMPLER_PROB)).astype(int) - 1
    source_dataset = init_dataset(cfg.SRC_DATA.NAMES,
                                  root_train=cfg.SRC_DATA.TRAIN_DIR,
                                  transfered=cfg.SRC_DATA.TRANSFERED)
    num_classes[0] = source_dataset.num_train_pids
    num_classes[1] = cfg.TGT_UNSUPDATA.CLUSTER_TOPK
    if cfg.MODEL.FINETUNE:
        num_classes[1] += 200

    # prepare model
    model = build_model(cfg, num_classes)

    optimizer, fixed_lr_idxs = make_optimizer(cfg, model)
    loss_fn = make_loss(cfg, num_classes)

    # Add for using self trained model
    if cfg.MODEL.PRETRAIN_CHOICE == 'resume':
        start_epoch = eval(
            cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')
            [-1])
        logger.info('Start epoch:%d' % start_epoch)
        path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace(
            'model', 'optimizer')
        logger.info('Path to the checkpoint of optimizer:%s' %
                    path_to_optimizer)
        model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
        optimizer.load_state_dict(torch.load(path_to_optimizer))
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD, start_epoch,
                                      fixed_lr_idxs)
    elif cfg.MODEL.PRETRAIN_CHOICE == 'self' or cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
        start_epoch = 0
        model.load_param(cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.PRETRAIN_CHOICE)
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD, -1,
                                      fixed_lr_idxs)
        camera_model = build_camera_model(cfg, num_classes=5)
        camera_model.load_param(cfg.TEST.CAMERA_WEIGHT,
                                cfg.MODEL.PRETRAIN_CHOICE)
    else:
        logger.info(
            'Only support pretrain_choice for imagenet and self, but got {}'.
            format(cfg.MODEL.PRETRAIN_CHOICE))

    do_train(
        cfg,
        model,
        camera_model,
        val_data_loader,
        optimizer,
        scheduler,  # modify for using self trained model
        loss_fn,
        num_query,
        start_epoch,  # add for using self trained model
        0)
Example #17
0
import torch
from config import Config
from torch.backends import cudnn

from utils.logger import setup_logger
from datasets import make_dataloader
from model import make_model
from solver import make_optimizer, WarmupMultiStepLR
from loss import make_loss

from processor import do_train

if __name__ == '__main__':

    Cfg = Config()
    logger = setup_logger('{}'.format(Cfg.PROJECT_NAME), Cfg.LOG_DIR)
    logger.info("Running with config:\n{}".format(Cfg.PROJECT_NAME))

    os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID
    cudnn.benchmark = True
    # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.

    train_loader, val_loader, num_query, num_classes = make_dataloader(Cfg)
    model = make_model(Cfg, num_class=num_classes)

    loss_func, center_criterion = make_loss(Cfg, num_classes=num_classes)

    optimizer, optimizer_center = make_optimizer(Cfg, model, center_criterion)
    scheduler = WarmupMultiStepLR(optimizer, Cfg.STEPS, Cfg.GAMMA,
                                  Cfg.WARMUP_FACTOR, Cfg.WARMUP_EPOCHS,
                                  Cfg.WARMUP_METHOD)
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument('--test_phase', action='store_true', help="use cpu")
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    test_dataloader, num_query, dataset = get_test_dataloader(cfg,
                                                              test_phase=True)

    original_filenames = []
    for img_path, _, _ in dataset.query:
        original_filenames.append(img_path.split('/')[-1].split('_')[1])
    query_idx = argsort(original_filenames)

    print('fixed query order', [dataset.query[i][0] for i in query_idx[:10]])

    original_filenames = []
    for img_path, _, _ in dataset.gallery:
        original_filenames.append(img_path.split('/')[-1].split('_')[1])
    gallery_idx = argsort(original_filenames)
    print('fixed gallery order',
          [dataset.gallery[i][0] for i in gallery_idx[:10]])

    distmat_paths = [
        cfg.TEST.DISTMAT1,
        cfg.TEST.DISTMAT2,
        cfg.TEST.DISTMAT3,
        cfg.TEST.DISTMAT4,
        cfg.TEST.DISTMAT5,
        cfg.TEST.DISTMAT6,
        cfg.TEST.DISTMAT7,
        cfg.TEST.DISTMAT8,
        cfg.TEST.DISTMAT9,
        cfg.TEST.DISTMAT10,
        cfg.TEST.DISTMAT11,
        cfg.TEST.DISTMAT12,
        cfg.TEST.DISTMAT13,
        cfg.TEST.DISTMAT14,
        cfg.TEST.DISTMAT15,
        cfg.TEST.DISTMAT16,
        cfg.TEST.DISTMAT17,
        cfg.TEST.DISTMAT18,
    ]

    cnt = 0
    for distmat_path in distmat_paths:
        if os.path.isfile(distmat_path):
            f = h5py.File(distmat_path, 'r')
            mat = f['dist_mat'][()]

            if not distmat_path.endswith('baseline_v5.1_distmat.h5'):
                mat = mat[query_idx]
                mat = mat[:, gallery_idx]

                f2 = h5py.File('%s_sorted.h5' % distmat_path, 'w')
                f2.create_dataset('dist_mat', data=mat, compression='gzip')
                f2.close()

                cnt += 1

            #mat = mat[np.newaxis, ...]
            #dist_mats.append(mat)
            f.close()

        else:
            logger.info(f'Invalid checkpoint path {distmat_path}')
    logger.info(f'Sort {cnt} results')
Example #19
0
def main():
    """Main function."""
    args = parse_args()

    work_dir = args.output_dir or f'{args.model_name}_rescore'
    logger_name = f'{args.model_name}_rescore_logger'
    logger = setup_logger(work_dir, args.logfile_name, logger_name)

    logger.info(f'Initializing generator.')
    model = build_generator(args.model_name, logger=logger)

    logger.info(f'Preparing latent codes.')
    if args.num <= 0:
        raise ValueError(f'Argument `num` should be specified as a positive '
                         f'number, but `{args.num}` received!')
    latent_codes = model.easy_sample(num=args.num, latent_space_type='z')
    latent_codes = model.easy_synthesize(latent_codes=latent_codes,
                                         latent_space_type='z',
                                         generate_style=False,
                                         generate_image=False)
    for key, val in latent_codes.items():
        np.save(os.path.join(work_dir, f'{key}.npy'), val)

    logger.info(f'Initializing predictor.')
    predictor = build_predictor(args.predictor_name)

    boundaries = parse_boundary_list(args.boundary_list_path)

    logger.info(f'========================================')
    logger.info(f'Rescoring.')
    score_changing = []
    for boundary_info, boundary_path in boundaries.items():
        logger.info(f'----------------------------------------')
        boundary_name, space_type = boundary_info
        logger.info(
            f'Boundary `{boundary_name}` from {space_type.upper()} space.')
        prefix = f'{boundary_name}_{space_type}'
        attr_idx = predictor.attribute_name_to_idx[boundary_name]

        try:
            boundary_file = np.load(boundary_path, allow_pickle=True).item()
            boundary = boundary_file['boundary']
        except ValueError:
            boundary = np.load(boundary_path)

        np.save(os.path.join(work_dir, f'{prefix}_boundary.npy'), boundary)

        if space_type == 'z':
            layerwise_manipulation = False
            is_code_layerwise = False
            is_boundary_layerwise = False
            num_layers = 0
            strength = 1.0
        else:
            layerwise_manipulation = True
            is_code_layerwise = True
            is_boundary_layerwise = (space_type == 'wp')
            num_layers = model.num_layers if args.layerwise_rescoring else 0
            if space_type == 'w':
                strength = get_layerwise_manipulation_strength(
                    model.num_layers, model.truncation_psi,
                    model.truncation_layers)
            else:
                strength = 1.0
            space_type = 'wp'

        codes = []
        codes.append(latent_codes[space_type][:, np.newaxis])
        for l in range(-1, num_layers):
            codes.append(
                manipulate(latent_codes[space_type],
                           boundary,
                           start_distance=2.0,
                           end_distance=2.0,
                           step=1,
                           layerwise_manipulation=layerwise_manipulation,
                           num_layers=model.num_layers,
                           manipulate_layers=None if l < 0 else l,
                           is_code_layerwise=is_code_layerwise,
                           is_boundary_layerwise=is_boundary_layerwise,
                           layerwise_manipulation_strength=strength))
        codes = np.concatenate(codes, axis=1)

        scores = []
        for i in tqdm(range(args.num), leave=False):
            images = model.easy_synthesize(latent_codes=codes[i],
                                           latent_space_type=space_type,
                                           generate_style=False,
                                           generate_image=True)['image']
            scores.append(
                predictor.easy_predict(images)['attribute'][:, attr_idx])
        scores = np.stack(scores, axis=0)
        np.save(os.path.join(work_dir, f'{prefix}_scores.npy'), scores)

        delta = scores[:, 1] - scores[:, 0]
        delta[delta < 0] = 0
        score_changing.append((boundary_name, np.mean(delta)))
        if num_layers:
            layerwise_score_changing = []
            for l in range(num_layers):
                delta = scores[:, l + 2] - scores[:, 0]
                delta[delta < 0] = 0
                layerwise_score_changing.append(
                    (f'Layer {l:02d}', np.mean(delta)))
            layerwise_score_changing.sort(key=lambda x: x[1], reverse=True)
            for layer_name, delta_score in layerwise_score_changing:
                logger.info(f'  {layer_name}: {delta_score:7.4f}')
    logger.info(f'----------------------------------------')
    logger.info(f'Most relevant semantics:')
    score_changing.sort(key=lambda x: x[1], reverse=True)
    for boundary_name, delta_score in score_changing:
        logger.info(f'  {boundary_name.ljust(15)}: {delta_score:7.4f}')
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("--id", required=True, type=int)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR

    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    query_loader: DataLoader
    gallery_loader: DataLoader
    query_loader, gallery_loader, num_classes = make_data_loader(
        cfg, get_demo_dataset=True)
    query_data = query_loader.dataset[args.id]

    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    if not os.path.exists('heatmaps/{}'.format(args.id)):
        os.makedirs('heatmaps/{}'.format(args.id))

    device = cfg.MODEL.DEVICE

    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
        model.to(cfg.MODEL.DEVICE)

    model.eval()
    with torch.no_grad():
        data, pid, camid, path = query_data
        query = data.to(device) if torch.cuda.device_count() >= 1 else data

    for batch in gallery_loader:
        data, pids, camids, paths = batch
        B = data.shape[0]
        gallerys = data.to(device) if torch.cuda.device_count() >= 1 else data

        heatmaps, distances = get_heatmap(model.module, query, gallerys,
                                          (224, 224))

        query_img = denormalize(query, cfg)
        for i in range(B):
            query_heatmap = cv2.applyColorMap(heatmaps[i], cv2.COLORMAP_JET)
            query_heatmap = (query_heatmap * 0.3 + query_img * 0.5).astype(
                np.uint8)

            gallery_heatmap = cv2.applyColorMap(heatmaps[B + i],
                                                cv2.COLORMAP_JET)

            gallery_img = denormalize(gallerys[i], cfg)
            gallery_heatmap = (gallery_heatmap * 0.3 +
                               gallery_img * 0.5).astype(np.uint8)

            heatmap = np.concatenate((query_heatmap, gallery_heatmap), axis=1)

            plt.imshow(heatmap)
            plt.savefig('heatmaps/{}/{}_{}_{}_{}.png'.format(
                args.id, distances[i].item(), i, camids[i], pids[i] == pid))
Example #21
0
def main():
    parser = argparse.ArgumentParser(
        description='Single Shot MultiBox Detector Testing')
    parser.add_argument('--resume',
                        dest='resume',
                        help='initialize with pretrained model weights',
                        default='./weights/ic15_90_15.pth',
                        type=str)
    parser.add_argument('--version',
                        dest='version',
                        help='512x512, 768x768, 768x1280, 1280x1280',
                        default='768x1280',
                        type=str)
    parser.add_argument('--dataset',
                        dest='dataset',
                        help='ic15, ic13, td500, coco',
                        default='ic15',
                        type=str)
    parser.add_argument('--works',
                        dest='num_workers',
                        help='num_workers to load data',
                        default=1,
                        type=int)
    parser.add_argument('--test_batch_size',
                        dest='test_batch_size',
                        help='train_batch_size',
                        default=1,
                        type=int)
    parser.add_argument('--out',
                        dest='out',
                        help='output file dir',
                        default='./outputs_eval/ic15/',
                        type=str)
    parser.add_argument('--log_file_dir',
                        dest='log_file_dir',
                        help='log_file_dir',
                        default='./logs/',
                        type=str)
    parser.add_argument('--ssd_dim', default=512, type=int, help='ssd dim')

    #parser.add_argument('--root', default='../../DataSets/text_detect/',type=str,  help='Location of data root directory')
    parser.add_argument('--ic_root',
                        default='../data/ocr/detection/',
                        type=str,
                        help='Location of data root directory')
    # parser.add_argument('--ic_root', default='/home/lvpengyuan/research/text/',type=str,  help='Location of data root directory')
    parser.add_argument('--td_root',
                        default='/home/lpy/Datasets/TD&&TR/',
                        type=str,
                        help='Location of data root directory')
    parser.add_argument('--coco_root',
                        default='/home/lpy/Datasets/coco-text/',
                        type=str,
                        help='Location of data root direction')
    args = parser.parse_args()
    cuda = torch.cuda.is_available()
    ## setup logger
    if os.path.exists(args.log_file_dir) == False:
        os.mkdir(args.log_file_dir)
    log_file_path = args.log_file_dir + 'eval_' + time.strftime(
        '%Y%m%d_%H%M%S') + '.log'
    setup_logger(log_file_path)

    if args.version == '512x512':
        cfg = cfg_512x512
    elif args.version == '768x768':
        cfg = cfg_768x768
    elif args.version == '1280x1280':
        cfg = cfg_1280x1280
    elif args.version == '768x1280':
        cfg = cfg_768x1280
    else:
        exit()

    ssd_dim = args.ssd_dim
    means = (104, 117, 123)

    if args.dataset == 'ic15':
        dataset = ICDARDetection(args.ic_root,
                                 'val',
                                 None,
                                 None,
                                 '15',
                                 dim=cfg['min_dim'])
        data_loader = data.DataLoader(dataset,
                                      args.test_batch_size,
                                      num_workers=args.num_workers,
                                      shuffle=False,
                                      pin_memory=True)
    elif args.dataset == 'ic13':
        dataset = ICDARDetection(args.ic_root,
                                 'val',
                                 None,
                                 None,
                                 '13',
                                 dim=cfg['min_dim'])
        data_loader = data.DataLoader(dataset,
                                      args.test_batch_size,
                                      num_workers=args.num_workers,
                                      shuffle=False,
                                      pin_memory=True)
    elif args.dataset == 'td500':
        dataset = TD500Detection(args.td_root,
                                 'val',
                                 None,
                                 None,
                                 aug=False,
                                 dim=cfg['min_dim'])
        data_loader = data.DataLoader(dataset,
                                      args.test_batch_size,
                                      num_workers=args.num_workers,
                                      shuffle=False,
                                      pin_memory=True)
    elif args.dataset == 'coco':
        dataset = COCODetection(args.coco_root, 'test', dim=cfg['min_dim'])
        data_loader = data.DataLoader(dataset,
                                      args.test_batch_size,
                                      num_workers=args.num_workers,
                                      shuffle=False,
                                      pin_memory=True)
    else:
        exit()

    logging.info('dataset initialize done.')

    ## setup mode

    net = build_dssd('test', cfg, ssd_dim, 2)

    logging.info('loading {}...'.format(args.resume))
    net.load_weights(args.resume)
    rpsroi_pool = RPSRoIPool(2, 2, 1, 2, 1)
    if cuda:
        net = net.cuda()
        rpsroi_pool = rpsroi_pool.cuda()
    net.eval()
    rpsroi_pool.eval()
    if os.path.exists(args.out) == False:
        os.makedirs(args.out)
    save_dir = args.out + '/' + args.resume.strip().split('_')[-1].split(
        '.')[0] + '/'
    if os.path.exists(save_dir) == False:
        os.mkdir(save_dir)
    seg_dir = save_dir + 'seg/'
    box_dir = save_dir + 'box/'
    res_dir = save_dir + 'res/'

    if os.path.exists(seg_dir) == False:
        os.mkdir(seg_dir)
        os.mkdir(box_dir)
        os.mkdir(res_dir)
    logging.info('eval begin')
    for i, sample in enumerate(data_loader, 0):
        img, image_name, ori_h, ori_w = sample
        # print(image_name)
        if i % 100 == 0:
            print(i, len(data_loader))
        h, w = img.size(2), img.size(3)
        if cuda:
            img = img.cuda()
        img = Variable(img)
        out, seg_pred, seg_map = net(img)
        save_name = image_name[0].split('/')[-1].split('.')[0]
        candidate_box = eval_img(out,
                                 seg_pred,
                                 seg_map,
                                 rpsroi_pool,
                                 img,
                                 save_name,
                                 seg_dir,
                                 box_dir,
                                 vis=True)

        # format output
        if args.dataset == 'coco':
            save_name = save_name.strip().split('_')[-1]
            save_name = str(int(save_name))
        res_name = res_dir + '/' + 'res_' + save_name + '.txt'
        fp = open(res_name, 'w')
        for box in candidate_box:
            temp_x = []
            temp_y = []
            temp = []
            for j in range(len(box) - 1):
                if j % 2 == 0:
                    temp_x.append(int(box[j] * ori_w[0] / w))
                    temp.append(str(int(box[j] * ori_w[0] / w)))
                else:
                    temp_y.append(int(box[j] * ori_h[0] / h))
                    temp.append(str(int(box[j] * ori_h[0] / h)))
            if args.dataset == 'ic13':
                fp.write(','.join([
                    str(min(temp_x)),
                    str(min(temp_y)),
                    str(max(temp_x)),
                    str(max(temp_y))
                ]) + '\n')
            elif args.dataset == 'coco':
                fp.write(','.join([
                    str(min(temp_x)),
                    str(min(temp_y)),
                    str(max(temp_x)),
                    str(max(temp_y)),
                    str(box[-1])
                ]) + '\n')
            else:
                fp.write(','.join(temp) + '\n')
        fp.close()

    logging.info('evaluate done')
Example #22
0
def main(args):
    config_yaml = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader)
    if not os.path.exists(args.config):
        raise FileNotFoundError('provided config file does not exist: %s' %
                                args.config)

    config_yaml['logger_name'] = 'onnx'
    config = SimCLRConfig(config_yaml)

    if not os.path.exists(config.base.output_dir_path):
        os.mkdir(config.base.output_dir_path)

    if not os.path.exists(config.base.log_dir_path):
        os.makedirs(config.base.log_dir_path)

    logger = setup_logger(config.base.logger_name, config.base.log_file_path)
    logger.info('using config: %s' % config)

    if not os.path.exists(args.model):
        raise FileNotFoundError('provided model directory does not exist: %s' %
                                args.model)
    else:
        logger.info('using model directory: %s' % args.model)

    config.onnx.model_path = args.model
    logger.info('using model_path: {}'.format(config.onnx.model_path))

    config.onnx.epoch_num = args.epoch_num
    logger.info('using epoch_num: {}'.format(config.onnx.epoch_num))

    model_file_path = Path(
        config.onnx.model_path).joinpath('checkpoint_' +
                                         config.onnx.epoch_num + '.pth')
    if not os.path.exists(model_file_path):
        raise FileNotFoundError('model file does not exist: %s' %
                                model_file_path)
    else:
        logger.info('using model file: %s' % model_file_path)

    train_dataset, val_dataset, test_dataset, classes = Datasets.get_datasets(
        config)
    num_classes = len(classes)

    train_loader, val_loader, test_loader = Datasets.get_loaders(
        config, train_dataset, val_dataset, test_dataset)

    torch_model = load_torch_model(config, num_classes)

    val_acc, test_acc = test_pt_model(config, torch_model, val_dataset,
                                      test_dataset, val_loader, test_loader)
    logger.info('torch model performance -> val_acc: {}, test_acc: {}'.format(
        val_acc, test_acc))

    torch_model = torch_model.to(torch.device('cpu'))
    onnx_model_file_path = save_onnx_model(torch_model,
                                           num_classes=num_classes,
                                           config=config,
                                           current_epoch=config.onnx.epoch_num)

    onnx_model = load_onnx_model(config, onnx_model_file_path)
    if onnx_model:
        logger.info('loaded onnx_model: {}'.format(onnx_model_file_path))

    val_acc, test_acc = test_onnx_model(config, onnx_model_file_path,
                                        val_dataset, test_dataset, val_loader,
                                        test_loader)
    logger.info('onnx model performance -> val_acc: {}, test_acc: {}'.format(
        val_acc, test_acc))
Example #23
0
def main():
    global args

    train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)

    TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        train_left_img, train_right_img, train_left_disp, True),
                                                 batch_size=args.train_bsize,
                                                 shuffle=True,
                                                 num_workers=4,
                                                 drop_last=False)

    TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        test_left_img, test_right_img, test_left_disp, False),
                                                batch_size=args.test_bsize,
                                                shuffle=False,
                                                num_workers=4,
                                                drop_last=False)

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)
    log = logger.setup_logger(args.save_path + '/training.log')
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ': ' + str(value))

    model = models.anynet.AnyNet(args)
    model = nn.DataParallel(model).cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    log.info('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    args.start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resume):
            log.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            log.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            log.info("=> no checkpoint found at '{}'".format(args.resume))
            log.info("=> Will start from scratch.")
    else:
        log.info('Not Resume')

    start_full_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        log.info('This is {}-th epoch'.format(epoch))

        train(TrainImgLoader, model, optimizer, log, epoch)

        savefilename = args.save_path + '/checkpoint.tar'
        torch.save(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, savefilename)

    test(TestImgLoader, model, log)
    log.info('full training time = {:.2f} Hours'.format(
        (time.time() - start_full_time) / 3600))
        # print info
        lr = optimizer.param_groups[0]['lr']
        if idx % args.print_freq == 0:
            logger.info(f'Train: [{epoch}][{idx}/{len(train_loader)}] lr: {lr:.5f}\t'
                        f'T {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                        f'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
                        f'loss {loss_meter.val:.3f} ({loss_meter.avg:.3f})\t'
                        f'prob {prob_meter.val:.3f} ({prob_meter.avg:.3f})\t'
                        f'CLD loss {train_CLD_loss.val:.3f} ({train_CLD_loss.avg:.3f})\t'
                        f'Top-1 acc {train_CLD_acc.val:.3f} ({train_CLD_acc.avg:.3f})')

    return loss_meter.avg, prob_meter.avg


if __name__ == '__main__':
    opt = parse_option()

    torch.cuda.set_device(opt.local_rank)
    torch.distributed.init_process_group(backend='nccl', init_method='env://')
    cudnn.benchmark = True

    os.makedirs(opt.save_dir, exist_ok=True)
    logger = setup_logger(output=opt.save_dir, distributed_rank=dist.get_rank(), name="moco+cld")
    if dist.get_rank() == 0:
        path = os.path.join(opt.save_dir, "config.json")
        with open(path, 'w') as f:
            json.dump(vars(opt), f, indent=2)
        logger.info("Full config saved to {}".format(path))

    main(opt)
Example #25
0
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, if_train=True)
    logger.info("Saving model in the path :{}".format(cfg.OUTPUT_DIR))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    train_loader, val_loader, num_query, num_classes = make_dataloader(cfg)

    if cfg.MODEL.PRETRAIN_CHOICE == 'finetune':
        model = make_model(cfg, num_class=num_classes)
Example #26
0
 def _set_logging(self):
     """Set logging"""
     self.paths = Paths.make_dirs(self.config.util.logdir)
     setup_logger(str(self.paths.logdir / 'info.log'))
Example #27
0
    model = Hybrid_Net(args)
    args.loss_weights = [0.5, 0.7, 1., 1., 1.]

elif args.model_types == "Hybrid_Net_DSM":
    model = Hybrid_Net(args)
    args.loss_weights = [0.5, 0.7, 1., 1., 1.]

else:

    AssertionError("model error")

if args.cuda:
    model = nn.DataParallel(model)
    model.cuda()

log = logger.setup_logger(args.save_path + '/training.log')
for key, value in sorted(vars(args).items()):
    log.info(str(key) + ': ' + str(value))

if args.pretrained:
    if os.path.isfile(args.pretrained):
        checkpoint = torch.load(args.pretrained)
        model.load_state_dict(checkpoint['state_dict'], strict=False)
        log.info('=> loaded pretrained model {}'.format(args.pretrained))
    else:
        log.info('=> no pretrained model found at {}'.format(args.pretrained))
        log.info("=> Will start from scratch.")

else:
    log.info('Not Resume')
Example #28
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    if args.config_file != "": cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir): mkdir(output_dir)
    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)
    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))
    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    def extract_feat_pn(loader):
        ptr = 0
        gall_feat_pos = np.zeros((len(loader.dataset), feature_dim))
        gall_feat_neg = np.zeros((len(loader.dataset), feature_dim))
        for batch_idx, (input, label) in enumerate(loader):
            batch_num = input.size(0)
            model.eval()
            with torch.no_grad():
                input = input.to('cuda')
                feat_pos, feat_neg = model(input)
                feat_pos = norm(feat_pos)
                feat_neg = norm(feat_neg)
                gall_feat_pos[ptr:ptr +
                              batch_num, :] = feat_pos.detach().cpu().numpy()
                gall_feat_neg[ptr:ptr +
                              batch_num, :] = feat_neg.detach().cpu().numpy()
                ptr = ptr + batch_num
        return gall_feat_pos, gall_feat_neg

    feature_dim = 2048

    def extract_feat(loader):
        ptr = 0
        gall_feat = np.zeros((len(loader.dataset), feature_dim))
        for batch_idx, (input, label) in enumerate(loader):
            batch_num = input.size(0)
            model.eval()
            with torch.no_grad():
                input = input.to('cuda')

                # feat_pos = model(input)
                feat_pos, feat_neg = model(input)

                # feat = torch.cat((feat_pos, feat_neg), dim=1)
                # feat = (feat_neg + feat_pos)/2
                # feat = torch.cat((norm(feat_pos), norm(feat_neg)), dim=1)

                feat = norm(feat_pos)

                gall_feat[ptr:ptr + batch_num, :] = feat.detach().cpu().numpy()
                ptr = ptr + batch_num
        return gall_feat

    '==============================================================================================='
    model = build_model(cfg, 395)
    model.load_state_dict(torch.load(cfg.TEST.WEIGHT))
    model.to('cuda')
    '==============================================================================================='
    data_path = '/data1/lidg/reid_dataset/IV-ReID/SYSU'
    query_img, query_label, query_cam = process_query_sysu(data_path,
                                                           mode='all')
    gall_img, gall_label, gall_cam = process_gallery_sysu(data_path,
                                                          mode='all',
                                                          trial=0)

    nquery = len(query_label)
    ngall = len(gall_label)
    print("Dataset statistics:")
    print("  ------------------------------")
    print("  subset   | # ids | # images")
    print("  ------------------------------")
    print("  query    | {:5d} | {:8d}".format(len(np.unique(query_label)),
                                              nquery))
    print("  gallery  | {:5d} | {:8d}".format(len(np.unique(gall_label)),
                                              ngall))
    print("  ------------------------------")

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    hight, width = 256, 128  #   (384, 256)   (256, 128)  (224,224)
    transform_test = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((hight, width)),
        transforms.ToTensor(),
        normalize,
    ])

    '==============================================================================================='
    queryset = TestData(query_img, query_label, transform=transform_test)
    query_loader = data.DataLoader(queryset,
                                   batch_size=29,
                                   shuffle=False,
                                   num_workers=4)
    print('Extracting Query Feature...')
    query_feat = extract_feat(query_loader)
    '==============================================================================================='

    all_cmc = 0
    all_mAP = 0
    acc = np.zeros((10, 4))
    for trial in range(10):
        gall_img, gall_label, gall_cam = process_gallery_sysu(data_path,
                                                              mode='all',
                                                              trial=trial)

        '==============================================================================================='
        trial_gallset = TestData(gall_img,
                                 gall_label,
                                 transform=transform_test)
        trial_gall_loader = data.DataLoader(trial_gallset,
                                            batch_size=29,
                                            shuffle=False,
                                            num_workers=4)
        print('Extracting Gallery Feature...')
        gall_feat = extract_feat(trial_gall_loader)
        '==============================================================================================='

        distmat = np.matmul(query_feat, np.transpose(gall_feat))

        cmc, mAP = eval_sysu(-distmat, query_label, gall_label, query_cam,
                             gall_cam)

        if trial == 0:
            all_cmc = cmc
            all_mAP = mAP
        else:
            all_cmc = all_cmc + cmc
            all_mAP = all_mAP + mAP
        print('Test Trial: {}'.format(trial))
        print(
            'FC: top-1: {:.2%} | top-5: {:.2%} | top-10: {:.2%}| top-20: {:.2%}'
            .format(cmc[0], cmc[4], cmc[9], cmc[19]))
        print('mAP: {:.2%}'.format(mAP))
        acc[trial][0] = float('%.4f' % cmc[0])
        acc[trial][1] = float('%.4f' % cmc[9])
        acc[trial][2] = float('%.4f' % cmc[19])
        acc[trial][3] = float('%.4f' % mAP)
    cmc = all_cmc / 10
    mAP = all_mAP / 10
    print('All Average:')
    print('FC: top-1: {:.2%} | top-5: {:.2%} | top-10: {:.2%}| top-20: {:.2%}'.
          format(cmc[0], cmc[4], cmc[9], cmc[19]))
    print('mAP: {:.2%}'.format(mAP))
    print(np.mean(acc, 0))
    print(np.std(acc, 0))
Example #29
0
import sys
import datetime
from dateutil.parser import parse

from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QWidget, QApplication
from UI.Ui_SoftKeyBoard import Ui_SoftKeyBoard

if __name__ == '__main__':
    from utils import logger

    global log
    log = logger.setup_logger('logging.log')
else:
    log = None

# 数字到特殊字符的映射关系
shift_map_num = {
    '1': '!',
    '2': '@',
    '3': '#',
    '4': '$',
    '5': '%',
    '6': '^',
    '7': '&',
    '8': '*',
    '9': '(',
    '0': ')',
}

shift_map_punctuation = {
from config.cfg import Cfg
from torch.backends import cudnn

from utils.logger import setup_logger
from datasets import make_dataloader
from model import make_model
from solver import make_optimizer, WarmupMultiStepLR
from loss import make_loss

from processor import do_train

if __name__ == '__main__':

    Cfg.freeze()
    log_dir = Cfg.DATALOADER.LOG_DIR
    logger = setup_logger('pose-transfer-gan.train', log_dir)
    logger.info("Running with config:\n{}".format(Cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True
    # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.

    train_loader, val_loader = make_dataloader(Cfg)
    model_G, model_Dip, model_Dii, model_D_reid = make_model(Cfg)

    optimizerG = make_optimizer(Cfg, model_G)
    optimizerDip = make_optimizer(Cfg, model_Dip)
    optimizerDii = make_optimizer(Cfg, model_Dii)

    schedulerG = WarmupMultiStepLR(optimizerG, Cfg.SOLVER.STEPS,
                                   Cfg.SOLVER.GAMMA, Cfg.SOLVER.WARMUP_FACTOR,
Example #31
0
    def train(self, cfg):
        # 设置gpu环境,考虑单卡多卡情况
        gpus_str = ''
        if isinstance(cfg.gpus, (list, tuple)):
            cfg.gpus = [int(i) for i in cfg.gpus]
            for s in cfg.gpus:
                gpus_str += str(s) + ','
            gpus_str = gpus_str[:-1]
        else:
            gpus_str = str(int(cfg.gpus))
            cfg.gpus = [int(cfg.gpus)]
        os.environ['CUDA_VISIBLE_DEVICES'] = gpus_str
        cfg.gpus = [i for i in range(len(cfg.gpus))
                    ] if cfg.gpus[0] >= 0 else [-1]

        # 设置log
        model_dir = os.path.join(cfg.save_dir, cfg.id)
        debug_dir = os.path.join(model_dir, 'debug')
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        if not os.path.exists(debug_dir):
            os.makedirs(debug_dir)
        logger = setup_logger(cfg.id, os.path.join(model_dir, 'log'))
        if USE_TENSORBOARD:
            writer = tensorboardX.SummaryWriter(
                log_dir=os.path.join(model_dir, 'log'))
        logger.info(cfg)

        gpus = cfg.gpus
        device = torch.device('cpu' if gpus[0] < 0 else 'cuda')
        lr = cfg.lr
        lr_step = cfg.lr_step
        num_epochs = cfg.num_epochs
        val_step = cfg.val_step
        sample_size = cfg.sample_size

        # 设置数据集
        dataset = YOLO(cfg.data_dir,
                       cfg.hflip,
                       cfg.vflip,
                       cfg.rotation,
                       cfg.scale,
                       cfg.shear,
                       opt=cfg,
                       split='train')
        names = dataset.class_name
        std = dataset.std
        mean = dataset.mean
        # 用数据集类别数设置预测网络
        cfg.setup_head(dataset)
        trainloader = DataLoader(dataset,
                                 batch_size=cfg.batch_size,
                                 shuffle=True,
                                 num_workers=cfg.num_workers,
                                 pin_memory=True,
                                 drop_last=True)

        # val_dataset = YOLO(cfg.data_dir, cfg.hflip, cfg.vflip, cfg.rotation, cfg.scale, cfg.shear, opt=cfg, split='val')
        # valloader = DataLoader(val_dataset, batch_size=1, shuffle=True, num_workers=1, pin_memory=True)
        valid_file = cfg.val_dir if not cfg.val_dir == '' else os.path.join(
            cfg.data_dir, 'valid.txt')
        with open(valid_file, 'r') as f:
            val_list = [l.rstrip() for l in f.readlines()]

        net = create_model(cfg.arch, cfg.heads, cfg.head_conv, cfg.down_ratio,
                           cfg.filters)
        optimizer = optim.Adam(net.parameters(), lr=lr)
        start_epoch = 0

        if cfg.resume:
            pretrain = os.path.join(model_dir, 'model_last.pth')
            if os.path.exists(pretrain):
                print('resume model from %s' % pretrain)
                try:
                    net, optimizer, start_epoch = load_model(
                        net, pretrain, optimizer, True, lr, lr_step)
                except:
                    print('\t... loading model error: ckpt may not compatible')
        model = ModleWithLoss(net, CtdetLoss(cfg))
        if len(gpus) > 1:
            model = nn.DataParallel(model, device_ids=gpus).to(device)
        else:
            model = model.to(device)

        step = 0
        best = 1e10
        log_loss_stats = ['loss', 'hm_loss', 'wh_loss']
        if cfg.reg_offset:
            log_loss_stats += ['off_loss']
        if cfg.reg_obj:
            log_loss_stats += ['obj_loss']
        for epoch in range(start_epoch + 1, num_epochs + 1):
            avg_loss_stats = {l: AverageMeter() for l in log_loss_stats}
            model.train()
            with tqdm(trainloader) as loader:
                for _, batch in enumerate(loader):
                    for k in batch:
                        if k != 'meta':
                            batch[k] = batch[k].to(device=device,
                                                   non_blocking=True)
                    output, loss, loss_stats = model(batch)
                    loss = loss.mean()
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                    # 设置tqdm显示信息
                    lr = optimizer.param_groups[0]['lr']
                    poststr = ''
                    for l in avg_loss_stats:
                        avg_loss_stats[l].update(loss_stats[l].mean().item(),
                                                 batch['input'].size(0))
                        poststr += '{}: {:.4f}; '.format(
                            l, avg_loss_stats[l].avg)
                    loader.set_description('Epoch %d' % (epoch))
                    poststr += 'lr: {:.4f}'.format(lr)
                    loader.set_postfix_str(poststr)

                    step += 1
                    # self.lossSignal.emit(loss.item(), step)
                    del output, loss, loss_stats

                    # valid
                    if step % val_step == 0:
                        if len(cfg.gpus) > 1:
                            val_model = model.module
                        else:
                            val_model = model
                        val_model.eval()
                        torch.cuda.empty_cache()

                        # 随机采样
                        idx = np.arange(len(val_list))
                        idx = np.random.permutation(idx)[:sample_size]

                        for j, id in enumerate(idx):
                            image = cv2.imread(val_list[id])
                            image = self.preprocess(image, cfg.input_h,
                                                    cfg.input_w, mean, std)
                            image = image.to(device)

                            with torch.no_grad():
                                output = val_model.model(image)[-1]

                            # 画图并保存
                            debugger = Debugger(dataset=names,
                                                down_ratio=cfg.down_ratio)
                            reg = output['reg'] if cfg.reg_offset else None
                            obj = output['obj'] if cfg.reg_obj else None
                            dets = ctdet_decode(output['hm'].sigmoid_(),
                                                output['wh'],
                                                reg=reg,
                                                obj=obj,
                                                cat_spec_wh=cfg.cat_spec_wh,
                                                K=cfg.K)
                            dets = dets.detach().cpu().numpy().reshape(
                                -1, dets.shape[2])
                            dets[:, :4] *= cfg.down_ratio
                            image = image[0].detach().cpu().numpy().transpose(
                                1, 2, 0)
                            image = np.clip(((image * std + mean) * 255.), 0,
                                            255).astype(np.uint8)
                            pred = debugger.gen_colormap(
                                output['hm'][0].detach().cpu().numpy())
                            debugger.add_blend_img(image, pred, 'pred_hm')
                            debugger.add_img(image, img_id='out_pred')
                            for k in range(len(dets)):
                                if dets[k, 4] > cfg.vis_thresh:
                                    debugger.add_coco_bbox(dets[k, :4],
                                                           dets[k, -1],
                                                           dets[k, 4],
                                                           img_id='out_pred')

                            debugger.save_all_imgs(debug_dir,
                                                   prefix='{}.{}_'.format(
                                                       step, j))
                            del output, image, dets
                        # 保存模型参数
                        save_model(os.path.join(model_dir, 'model_best.pth'),
                                   epoch, net)
                        model.train()

            logstr = 'epoch {}'.format(epoch)
            for k, v in avg_loss_stats.items():
                logstr += ' {}: {:.4f};'.format(k, v.avg)
                if USE_TENSORBOARD:
                    writer.add_scalar('train_{}'.format(k), v.avg, epoch)
            logger.info(logstr)

            # if epoch % val_step == 0:
            #     if len(cfg.gpus) > 1:
            #         val_model = model.module
            #     else:
            #         val_model = model
            #     val_model.eval()
            #     torch.cuda.empty_cache()
            #
            #     val_loss_stats = {l: AverageMeter() for l in log_loss_stats}
            #
            #     with tqdm(valloader) as loader:
            #         for j, batch in enumerate(loader):
            #             for k in batch:
            #                 if k != 'meta':
            #                     batch[k] = batch[k].to(device=device, non_blocking=True)
            #             with torch.no_grad():
            #                 output, loss, loss_stats = val_model(batch)
            #
            #             poststr = ''
            #             for l in val_loss_stats:
            #                 val_loss_stats[l].update(
            #                     loss_stats[l].mean().item(), batch['input'].size(0))
            #                 poststr += '{}: {:.4f}; '.format(l, val_loss_stats[l].avg)
            #             loader.set_description('Epoch %d valid' % (epoch))
            #             poststr += 'lr: {:.4f}'.format(lr)
            #             loader.set_postfix_str(poststr)
            #
            #             if j < sample_size:
            #                 # 将预测结果画出来保存成jpg图片
            #                 debugger = Debugger(dataset=names, down_ratio=cfg.down_ratio)
            #                 reg = output['reg'] if cfg.reg_offset else None
            #                 obj = output['obj'] if cfg.reg_obj else None
            #                 dets = ctdet_decode(
            #                     output['hm'], output['wh'], reg=reg, obj=obj,
            #                     cat_spec_wh=cfg.cat_spec_wh, K=cfg.K)
            #                 dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
            #                 dets[:, :, :4] *= cfg.down_ratio
            #                 dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
            #                 dets_gt[:, :, :4] *= cfg.down_ratio
            #                 for i in range(1):
            #                     img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
            #                     img = np.clip(((img * std + mean) * 255.), 0, 255).astype(np.uint8)
            #                     pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
            #                     gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
            #                     debugger.add_blend_img(img, pred, 'pred_hm')
            #                     debugger.add_blend_img(img, gt, 'gt_hm')
            #                     debugger.add_img(img, img_id='out_pred')
            #                     for k in range(len(dets[i])):
            #                         if dets[i, k, 4] > cfg.vis_thresh:
            #                             debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
            #                                                    dets[i, k, 4], img_id='out_pred')
            #
            #                     debugger.add_img(img, img_id='out_gt')
            #                     for k in range(len(dets_gt[i])):
            #                         if dets_gt[i, k, 4] > cfg.vis_thresh:
            #                             debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
            #                                                    dets_gt[i, k, 4], img_id='out_gt')
            #
            #                     debugger.save_all_imgs(debug_dir, prefix='{}.{}_'.format(epoch, j))
            #             del output, loss, loss_stats
            #     model.train()
            #     logstr = 'epoch {} valid'.format(epoch)
            #     for k, v in val_loss_stats.items():
            #         logstr += ' {}: {:.4f};'.format(k, v.avg)
            #         if USE_TENSORBOARD:
            #             writer.add_scalar('val_{}'.format(k), v.avg, epoch)
            #     logger.info(logstr)
            #     if val_loss_stats['loss'].avg < best:
            #         best = val_loss_stats['loss'].avg
            #         save_model(os.path.join(model_dir, 'model_best.pth'), epoch, net)
            save_model(os.path.join(model_dir, 'model_last.pth'), epoch, net,
                       optimizer)
            if epoch in cfg.lr_step:
                save_model(
                    os.path.join(model_dir, 'model_{}.pth'.format(epoch)),
                    epoch, net, optimizer)
                lr = cfg.lr * (0.1**(cfg.lr_step.index(epoch) + 1))
                logger.info('Drop LR to {}'.format(lr))
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr
Example #32
0
from tempfile import NamedTemporaryFile
from ansible import callbacks
from ansible import utils

from tempfile import NamedTemporaryFile
import ConfigParser
import jinja2
import sys
import os

# Read the coonfig file
CONFIG = ConfigParser.ConfigParser()
CONFIG.read('cloud.ini')

# Simple logger
LOGGER = logger.setup_logger("new_instance")

# Ansible loggers, courtesy of https://serversforhackers.com/running-ansible-programmatically
utils.VERBOSITY = 1
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
stats = callbacks.AggregateStats()
runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)

# check_pubkey, if it does not exist will create an "ansible" key
instances.check_pubkey(CONFIG.get("instances", "key_name"), CONFIG.get("instances", "pub_key_path"))

# Let's create the server
server = instances.create_server('%s' % (sys.argv[1]),
    CONFIG.get("instances", "flavor"),
    CONFIG.get("instances", "image"),
    CONFIG.get("instances", "key_name")