예제 #1
0
def perform_test(cluster_name,
                 exp_name,
                 exp_root_dir,
                 model_file_pf,
                 cuda=False):
    # Dataset Configurations
    opt = conf.parse(cluster_name, exp_name)
    dataset = opt['dataset']
    #dataset['workers'] = 0
    dataset['produce'] = ['orig', 'down', 'label']
    dataset['batch_size'] = 30
    loader = get_test_loader(**dataset)

    # Classifier configuration
    netAConf = {
        k: v
        for k, v in opt['netA'].items() if k in ['gpu', 'model', 'resume']
    }
    netAConf['model']['num_classes'] = opt['reset_ac_out']
    #netAConf.update({'resume': join(exp_root_dir, exp_name, '_results/model/chk_ac_{}.pth.tar'.format(model_file_pf))})
    netAConf.update({
        'resume':
        join(exp_root_dir, exp_name,
             'model/chk_ac_{}.pth.tar'.format(model_file_pf))
    })

    # Device selection
    device = torch.device('cuda') if cuda else torch.devide('cpu')

    # Instantiate networks
    netA = NetworkWrapper(name='netA', **netAConf).to(device)
    netA.eval()
    if 'netG' in opt:
        netG = NetworkWrapper(name='G', **opt['netG']).to(device)
        netG.eval()
    else:
        netG = None

    results = {}  # {'tube_id':{'<tag>': [predictions], ...}, ...}
    gt_map = {}
    pbar = tqdm(total=len(loader))
    for i, batch in enumerate(loader):
        x, x_down, label, tube_ids, tube_tags = batch[0].to(
            device), batch[1].to(device), batch[2], batch[3], batch[4]

        # Super-resolve the image if G is defined
        if netG is not None:
            gout = netG(x_down)
            rec, loc = (gout[0],
                        gout[1]) if isinstance(gout, tuple) else (gout, None)
            acin = (rec * loc) if opt.get('apply_loc', False) else rec
        else:
            acin = x

        # Resnormalization
        acin = (acin + 1.0) * 0.5 if opt.get('norm_to_01', False) else acin

        # Forward netA
        y = netA(acin)

        for bid in range(y.size(0)):
            tid, tag = tube_ids[bid].item(), tube_tags[bid].item()

            if tid not in gt_map:
                gt_map[tid] = label[bid].numpy()

            if tid not in results:
                results[tid] = {}

            if tag in results[tid]:
                raise Exception('Duplicate tag is found for tid:', tid,
                                ' | tag:', tag)
            results[tid][tag] = y[bid].cpu().numpy()
        pbar.update(1)
    pbar.close()

    pred_map = post_processing(results)
    ground_truth, predictions = get_gt_prediction_arr(gt_map, pred_map)
    predictions = np.argmax(predictions, axis=1)

    acc = accuracy_score(ground_truth, predictions)

    # Print the final results
    print(exp_name + ':')
    print('    -Accuracy : {:.4f}'.format(acc), flush=True)

    # Write results to the file
    with open(join(exp_root_dir, exp_name, 'log', 'test.txt'),
              'a+') as outfile:
        outfile.write('Exp {} ({})\n-----\n'.format(
            exp_name,
            datetime.now().strftime("%m/%d/%Y, %H:%M:%S")))
        outfile.write('    -Accuracy : {:.4f}\n'.format(acc))
        outfile.write('-' * 100)
        outfile.write('\n ')
예제 #2
0
            if ('images' in json_res['body']['body']) and (len(json_res['body']['body']['images']) > 0):
                for image in json_res['body']['body']['images']:
                    cur_entity = utils.Entity(creator, post_id, page_url, image['originalUrl'], image['id'] + '.' + image['extension'], title)
                    if cur_indexes.add(cur_entity):
                        utils.info('发现新图片{}'.format(cur_entity.image_filename))
                        new_image += 1
            else:
                utils.warning('no image found')
        else:
            utils.error('key "body" not found')
    else:
        utils.error('request failed')

with open('config.json', 'r', encoding='utf-8') as config_f:
    config_dict = json.load(config_f)
conf.parse(config_dict)

os.chdir(conf.work_path)
sess = requests.Session()
sess.proxies = conf.proxies
sess.headers.update({
    'user-agent': conf.ua,
    'cookie': conf.cookie_string,
    'accept': 'application/json',
    'origin': 'https://www.fanbox.cc'
})

cur_indexes = utils.Index('index.json')
request_queue = queue.SimpleQueue()
skipped_post = 0
new_image = 0
예제 #3
0
def create_gifs(cluster_name,
                exp_name,
                exp_root_dir,
                model_file_pf,
                out_dir,
                cuda=False,
                save_rec=True,
                save_orig=False,
                save_up=False,
                save_down=False):
    # Configurations
    opt = conf.parse(cluster_name, exp_name)
    dataset = opt['dataset']
    dataset['produce'] = ['down', 'up', 'orig']
    dataset['batch_size'] = 28
    netG = {
        k: v
        for k, v in opt['gan1']['netG'].items()
        if k in ['gpu', 'model', 'resume']
    }
    netG.update({
        'resume':
        join(exp_root_dir, exp_name,
             '_results/model/chk_sr_{}.pth.tar'.format(model_file_pf))
    })
    visrange = (-1.0, 1.0) if dataset['norm'] == '-11' else (0.0, 1.0)
    device = torch.device('cuda') if cuda else torch.devide('cpu')

    test_loader = get_test_loader(**dataset)
    gen = GeneratorWrapper(name='G', **netG).to(device).eval()

    for i, batch in enumerate(test_loader):
        ctx, up, gt = batch[0].to(device), batch[1].to(device), batch[2].to(
            device)
        b, c, t, h, w = gt.size()

        rec = gen(ctx)

        vmin, vmax = visrange
        if vmin != 0.0 or vmax != 1.0:
            ctx = (ctx - vmin) / (vmax - vmin)
            up = (up - vmin) / (vmax - vmin)
            gt = (gt - vmin) / (vmax - vmin)
            rec = (rec - vmin) / (vmax - vmin)

        # create gif
        for bid in range(b):
            if save_rec:
                recdir = join(out_dir, dataset['name'],
                              'x' + str(dataset['scale_factor']), exp_name)
                utl.mkdirs(recdir)
                create_gif(rec[bid, :, :, :, :],
                           join(recdir, 'movie{}_{}.gif'.format(i, bid)))

            if save_orig:
                origdir = join(out_dir, dataset['name'], 'orig')
                utl.mkdirs(origdir)
                create_gif(gt[bid, :, :, :, :],
                           join(origdir, 'movie{}_{}.gif'.format(i, bid)))

            if save_down:
                downdir = join(out_dir, dataset['name'],
                               'x' + str(dataset['scale_factor']), 'down')
                utl.mkdirs(downdir)
                create_gif(ctx[bid, :, :, :, :],
                           join(downdir, 'movie{}_{}.gif'.format(i, bid)))

            if save_up:
                updir = join(out_dir, dataset['name'],
                             'x' + str(dataset['scale_factor']), 'up')
                utl.mkdirs(updir)
                create_gif(up[bid, :, :, :, :],
                           join(updir, 'movie{}_{}.gif'.format(i, bid)))
예제 #4
0
from deepnn.log.history_logger import HistorySaver
from deepnn.util.avg_meter import AverageMeter
from gan.generator import GeneratorWrapper
from gan.discriminator import DiscriminatorWrapper
from deepnn.nets.network_wrapper import NetworkWrapper
from reader.loader import get_train_loader, get_val_loader
import deepnn.util as utl
import conf

# READ experiment protocol
if len(sys.argv) != 3:
    raise Exception('Please see usage: python main.py <machine_name> <exp_id>')
machine_name, exp_name = sys.argv[1], sys.argv[2]
#exp_name = '150sa'

opt = conf.parse(machine_name, exp_name)

device = torch.device("cuda" if opt['cuda'] else "cpu")
cpu = torch.device('cpu')
gpu = torch.device('cuda')
visrange = (-1.0, 1.0) if opt['dataset']['norm'] == '-11' else (0.0, 1.0)

# Measurements
psnr = PSNR(peak=1.0)
ssim = SSIM()
accuracy = Accuracy()
f1_score = F1Score()

# Logging and Visualization
se = opt['start_epoch'] if opt['start_epoch'] > 0 else -1
logger = HistorySaver(join(opt['logdir'], 'hist'), se)
예제 #5
0
def perform_test(cluster_name,
                 exp_name,
                 exp_root_dir,
                 model_file_pf,
                 cuda=False):
    # Dataset Configurations
    opt = conf.parse(cluster_name, exp_name)
    dataset = opt['dataset']
    dataset['produce'] = ['orig', 'down', 'label']
    dataset['batch_size'] = 64
    loader = get_test_loader(**dataset)

    # Generator Configuration
    netGConf = {
        k: v
        for k, v in opt['gan1']['netG'].items()
        if k in ['gpu', 'model', 'resume']
    }
    netGConf.update({
        'resume':
        join(exp_root_dir, exp_name,
             'model/gan/chk_gan_{}.pth.tar'.format(model_file_pf))
    })

    # Classifier configuration
    netAConf = {
        k: v
        for k, v in opt['netA'].items() if k in ['gpu', 'model', 'resume']
    }
    if 'reset_ac_out' in opt:
        netAConf['model']['num_classes'] = opt['reset_ac_out']
    #netAConf.update({'resume': join(exp_root_dir, exp_name, '_results/model/chk_ac_{}.pth.tar'.format(model_file_pf))})
    netAConf.update({
        'resume':
        join(exp_root_dir, exp_name,
             'model/ac/chk_ac_{}.pth.tar'.format(model_file_pf))
    })

    # Device selection
    device = torch.device('cuda') if cuda else torch.devide('cpu')

    # Instantiate networks
    netA = NetworkWrapper(name='netA', **netAConf).to(device)
    netA.eval()
    netG = NetworkWrapper(name='G', **netGConf).to(device)
    netG.eval()

    meters = MeterCache()
    results = {}  # {'tube_id':{'<tag>': [predictions], ...}, ...}
    gt_map = {}
    pbar = tqdm(total=len(loader))
    for i, batch in enumerate(loader):
        x, x_down, label, tube_ids, tube_tags = batch[0].to(
            device), batch[1].to(device), batch[2], batch[3], batch[4]

        # Super-resolve the image if G is defined
        rec = netG(x_down)

        # Resnormalization
        acin = (rec + 1.0) * 0.5 if opt.get('norm_to_01', False) else rec

        # Forward netA
        y = netA(acin)

        for bid in range(y.size(0)):
            tid, tag = tube_ids[bid].item(), tube_tags[bid].item()

            if tid not in gt_map:
                gt_map[tid] = label[bid].numpy()

            if tid not in results:
                results[tid] = {}

            if tag in results[tid]:
                raise Exception('Duplicate tag is found for tid:', tid,
                                ' | tag:', tag)
            results[tid][tag] = y[bid].cpu().numpy()

        # Denorm the output so that they lie between 0 and 1
        rec_dn = loader.dataset.denormalize(rec)
        gt_dn = loader.dataset.denormalize(x)

        # PSNR and SSIM
        b, c, t, h, w = x.size()
        rec_batch = rec_dn.permute(0, 2, 1, 3,
                                   4).contiguous().view(b * t, c, h, w)
        gt_batch = gt_dn.permute(0, 2, 1, 3,
                                 4).contiguous().view(b * t, c, h, w)
        pval = psnr(rec_batch.data, gt_batch.data)
        ssim_val = ssim(rec_batch.data, gt_batch.data)
        metric_map = {
            'psnr': [pval, rec_batch.size(0)],
            'ssim': [ssim_val, rec_batch.size(0)]
        }
        meters.update(**metric_map)
        pbar.update(1)
    pbar.close()

    # Accuracy
    pred_map = post_processing(results)
    ground_truth, predictions = get_gt_prediction_arr(gt_map, pred_map)
    predictions = np.argmax(predictions, axis=1)
    acc = accuracy_score(ground_truth, predictions)

    # Print the final results
    print(exp_name + ':')
    print('    -Accuracy : {:.4f}'.format(acc), flush=True)
    for k, meter in meters.cache.items():
        print('    -{} : {:.4f}'.format(k, meter.avg), flush=True)

    # Write results to the file
    with open(join(exp_root_dir, exp_name, 'log', 'test.txt'),
              'a+') as outfile:
        outfile.write('Exp {} ({})\n-----\n'.format(
            exp_name,
            datetime.now().strftime("%m/%d/%Y, %H:%M:%S")))
        outfile.write('    -Accuracy : {:.4f}\n'.format(acc))
        for k, meter in meters.cache.items():
            outfile.write('    -{} : {:.4f}\n'.format(k, meter.avg))
        outfile.write('-' * 100)
        outfile.write('\n ')
예제 #6
0
def __usage_iosim():
    print("""\
Usage: iosim.py [<options>] <trace path>
  <options>
   -h: help(this message)
   -c <size in blks>
   -t <storage type>: all, default(no prefetch, lru), prefetch, ml, rule
   -T <timestamp range>
  <options for rule>
   -p: enable per-process reference history (default: disabled)
   -b <count>: reference history count, (default: 1)
  <options for ml>
   -p: enable per-process reference history (default: enabled)
   -G <width(time) x height(lba)>: grid dimension (default: 5x10)
   -u <sec>: width unit for time (default: 0.005)
   -L <lba max>
   -m <model path>: for storage ml only
   -M <model type>: for storage ml only
""")

if __name__ == "__main__":
    from sim import Simulator

    logger.init("iosim")

    sim = Simulator()

    conf.parse(__usage_iosim)
    sim.run()
예제 #7
0
def perform_test(cluster_name,
                 exp_name,
                 exp_root_dir,
                 model_file_pf,
                 cuda=False):
    # Dataset Configurations
    opt = conf.parse(cluster_name, exp_name)
    opt['f1_threshold'] = 0.5
    dataset = opt['dataset']
    #dataset['produce'] = ['orig', 'down', 'up', 'label']
    dataset['batch_size'] = 64
    loader = get_test_loader(**dataset)

    # Device selection
    device = torch.device('cuda') if cuda else torch.devide('cpu')

    netG, netA = get_networks(opt, exp_name, exp_root_dir, model_file_pf,
                              device)

    meters = MeterCache()
    predictions, ground_truth = [], []
    for i, batch in enumerate(loader):
        x, hr_gt, label = get_data(exp_name, dataset['produce'], batch, device)

        # Super-resolve the image if G is defined
        rec = netG(x) if netG is not None else x

        # Forward netA
        y = netA(rec)

        # Save predictions for F1 score calculation
        ground_truth.append(label.cpu().data.numpy())
        predictions.append(y.cpu().data.numpy())

        # Denorm the output so that they lie between 0 and 1
        if netG is not None:
            rec_dn = loader.dataset.denormalize(rec)
            gt_dn = loader.dataset.denormalize(hr_gt)

            # PSNR and SSIM
            b, c, t, h, w = hr_gt.size()
            rec_batch = rec_dn.permute(0, 2, 1, 3,
                                       4).contiguous().view(b * t, c, h, w)
            gt_batch = gt_dn.permute(0, 2, 1, 3,
                                     4).contiguous().view(b * t, c, h, w)
            pval = psnr(rec_batch.data, gt_batch.data)
            ssim_val = ssim(rec_batch.data, gt_batch.data)
            metric_map = {
                'psnr': [pval, rec_batch.size(0)],
                'ssim': [ssim_val, rec_batch.size(0)]
            }
            meters.update(**metric_map)

    # Print the final results
    print(exp_name + ':')

    # F1 score calculation
    ground_truth = reduce(lambda x, y: np.concatenate([x, y], axis=0),
                          ground_truth)
    predictions = reduce(lambda x, y: np.concatenate([x, y], axis=0),
                         predictions)
    print(ground_truth.shape, predictions.shape)
    predictions = (np.array(predictions) > opt['f1_threshold']).astype(int)
    ground_truth = (np.array(ground_truth) > opt['f1_threshold']).astype(int)
    results_actions = precision_recall_fscore_support(np.array(ground_truth),
                                                      np.array(predictions),
                                                      average=None)
    f1_scores, precision, recall = results_actions[2], results_actions[
        0], results_actions[1]
    f1_mean, prec_mean, rec_mean = np.mean(f1_scores), np.mean(
        precision), np.mean(recall)
    print('Test F1-Score:')
    print(str(f1_scores))
    print('Test, [F1-Score: %4f] [Precision: %4f] [Recall: %4f]' %
          (f1_mean, prec_mean, rec_mean))

    for k, meter in meters.cache.items():
        print('    -{} : {:.2f}'.format(k, meter.avg), flush=True)