Exemplo n.º 1
0
 def gen_traffic(self, url, page_source, response_headers):
     if self.browser == 'chrome':
         request = HttpRequest(method='GET',
                               url=url,
                               headers=Traffic_generator.DEFAULT_HEADER,
                               body='')
         if response_headers is None:
             response_headers = {}
         response = HttpResponse(code='200',
                                 reason='OK',
                                 headers=response_headers,
                                 data=page_source)
         return (request, response)
     # pickled error when phantomjs,the headers must be str
     elif self.browser == 'phantomjs':
         request = HttpRequest(method='GET',
                               url=url,
                               headers=dict2str(
                                   Traffic_generator.DEFAULT_HEADER),
                               body='')
         if response_headers is None:
             response_headers = {}
         response = HttpResponse(code='200',
                                 reason='OK',
                                 headers=dict2str(response_headers),
                                 data=page_source)
         return (request, response)
Exemplo n.º 2
0
def init(f):
    global LOG_FOUT
    global MODEL
    global MODEL_CONF
    global TRAIN_FILES
    global TEST_FILES

    # IMPORT network module
    module = importlib.import_module(f['model'])
    MODEL = getattr(module, f['model'])
    MODEL_CONF = getattr(module, f['model'] + '_conf')
    MODEL_FILE = os.path.join(BASE_DIR, 'models', f['model'] + '.py')

    # MAKE log directory
    if not os.path.exists(f['log_dir']):
        os.mkdir(f['log_dir'])
    os.system('cp %s %s' % (MODEL_FILE, f['log_dir']))  # bkp of model def
    # CREATE log file
    LOG_FOUT = open(os.path.join(f['log_dir'], 'log_train.txt'), 'a')
    LOG_FOUT.write(dict2str(f))

    # GET dataset files' list
    TRAIN_FILES = provider.getDataFiles(
        os.path.join(f['dataset_path'], 'train_files.txt'))
    TEST_FILES = provider.getDataFiles(
        os.path.join(f['dataset_path'], 'test_files.txt'))
Exemplo n.º 3
0
def init(f):
    global LOG_FOUT
    global DUMP_FILES
    global TEST_FILES

    # MAKE log directory
    if not os.path.exists(f['visual_dir']):
        os.mkdir(f['visual_dir'])
    # CREATE log file
    LOG_FOUT = open(os.path.join(f['visual_dir'], 'log_visualization.txt'),
                    'w')
    LOG_FOUT.write(dict2str(f))

    DUMP_FILES = provider.getDataFiles( \
        os.path.join(BASE_DIR, os.path.join(f['dump_dir'], 'list_evaluate.txt')))
    TEST_FILES = provider.getDataFiles( \
        os.path.join(BASE_DIR, os.path.join(f['dataset_path'], 'test_files.txt')))
Exemplo n.º 4
0
def init(f):
    global LOG_FOUT
    global MODEL
    global MODEL_CONF
    global TEST_FILES
    global FLIST_FOUT

    # IMPORT network module
    sys.path.append(f['model_path'])
    module = importlib.import_module(f['model'])
    MODEL = getattr(module, f['model'])
    MODEL_CONF = getattr(module, f['model'] + '_conf')

    # MAKE log directory
    if not os.path.exists(f['dump_dir']):
        os.mkdir(f['dump_dir'])
    # CREATE log file
    LOG_FOUT = open(os.path.join(f['dump_dir'], 'log_evaluate.txt'), 'w')
    LOG_FOUT.write(dict2str(f))
    FLIST_FOUT = open(os.path.join(f['dump_dir'], 'list_evaluate.txt'), 'w')

    TEST_FILES = provider.getDataFiles( \
        os.path.join(BASE_DIR, os.path.join(f['dataset_path'], 'test_files.txt')))
Exemplo n.º 5
0
def main(config):
	device = torch.device(config['device'])

	##### Setup Dirs #####
	experiment_dir = config['path']['experiments'] + config['name']
	util.mkdir_and_rename(
                experiment_dir)  # rename experiment folder if exists
	util.mkdirs((experiment_dir+'/sr_images', experiment_dir+'/lr_images'))

	##### Setup Logger #####
	logger = util.Logger('test', experiment_dir, 'test_' + config['name'])

	##### print Experiment Config
	logger.log(util.dict2str(config))
	
	###### Load Dataset #####
	testing_data_loader = dataset.get_test_sets(config['dataset'], logger)

	trainer = create_model(config, logger)
	trainer.print_network_params(logger)

	total_avg_psnr = 0.0
	total_avg_ssim = 0.0

	for name, test_set in testing_data_loader.items():
		logger.log('Testing Dataset {:s}'.format(name))
		valid_start_time = time.time()
		avg_psnr = 0.0
		avg_ssim = 0.0
		idx = 0
		for i, batch in enumerate(test_set):
			idx += 1
			img_name = batch[2][0][batch[2][0].rindex('/')+1:]
			# print(img_name)
			img_name = img_name[:img_name.index('.')]
			img_dir_sr = experiment_dir+'/sr_images'
			img_dir_lr = experiment_dir+'/lr_images'
			util.mkdir(img_dir_sr)
			infer_time = trainer.test(batch)
			visuals = trainer.get_current_visuals()
			lr_img = util.tensor2img(visuals['LR'])
			sr_img = util.tensor2img(visuals['SR'])  # uint8
			gt_img = util.tensor2img(visuals['HR'])  # uint8
			save_sr_img_path = os.path.join(img_dir_sr, '{:s}.png'.format(img_name))
			save_lr_img_path = os.path.join(img_dir_lr, '{:s}.png'.format(img_name))
			util.save_img(lr_img, save_lr_img_path)
			util.save_img(sr_img, save_sr_img_path)
			crop_size = config['dataset']['scale']
			psnr, ssim = util.calc_metrics(sr_img, gt_img, crop_size)
			#logger.log('[ Image: {:s}  PSNR: {:.4f} SSIM: {:.4f} Inference Time: {:.8f}]'.format(img_name, psnr, ssim, infer_time))
			avg_psnr += psnr
			avg_ssim += ssim
		avg_psnr = avg_psnr / idx
		avg_ssim = avg_ssim / idx
		valid_t = time.time() - valid_start_time
		logger.log('[ Set: {:s} Time:{:.3f}] PSNR: {:.2f} SSIM {:.4f}'.format(name, valid_t, avg_psnr, avg_ssim))
		
		iter_start_time = time.time()

		total_avg_ssim += avg_ssim
		total_avg_psnr += avg_psnr

	total_avg_ssim /= len(testing_data_loader)
	total_avg_psnr /= len(testing_data_loader)
	
	logger.log('[ Total Average of Sets: PSNR: {:.2f} SSIM {:.4f}'.format(total_avg_psnr, total_avg_ssim))