Exemple #1
0
class TaskLoader(BaseTask):

    def __init__(self,logger,project_input_dir,output_dir, process_id, core_call_type,comp_type,max_error_threshold):
        self.logger = logger
        self.project_input_dir = project_input_dir
        self.output_dir = output_dir

        self.config = Config ('core/config.ini')
        project_comparator_obj = ProjectComparator.objects.get(id=process_id)
        # print os.path.join(self.project_input_dir, project_comparator_obj.project_platform_import_file)
        self.process_id = process_id

        if core_call_type == 'core':
            #get input files
            self.platform_import_file = self.config.get_config_value('input_files','platform_import_file')
            self.url_to_scrape_file = self.config.get_config_value('input_files','url_to_scrape_file')
            self.field_to_xpath_file = self.config.get_config_value('input_files','field_to_xpath_file')
            #get output files
            self.scrapped_data_file = self.config.get_config_value('output_files','scrapped_output_csv')
            self.comparision_report_file = self.config.get_config_value('output_files','comparison_report')

        elif core_call_type == 'db':
            try:
                project_comparator_obj = ProjectComparator.objects.get(id=process_id)
                #get input files
                # print project_comparator_obj
                self.platform_import_file = os.path.join(self.project_input_dir, project_comparator_obj.project_platform_import_file)
                self.url_to_scrape_file = os.path.join(self.project_input_dir, project_comparator_obj.project_url_file)
                self.field_to_xpath_file = os.path.join(self.project_input_dir, project_comparator_obj.project_xpath_file)
                #get output files
                self.scrapped_data_file = project_comparator_obj.project_scrapper_output_file
                self.comparision_report_file = project_comparator_obj.project_report_file
                self.comp_type = comp_type
                self.max_error_threshold = max_error_threshold
            except Exception:
                # print '>>>>>>More than one record exists for %s' % process_id, '>>>>>>
                pass

    def start_task(self):
        #Start Scrapping Task

        if (self.comp_type == 1):
            # Immediate comparison Task
            self.reporter = Reporter(self.process_id,self.url_to_scrape_file,self.field_to_xpath_file,self.platform_import_file,self.scrapped_data_file,self.comparision_report_file,self.logger,self.output_dir,self.comp_type,self.max_error_threshold)
            self.reporter.start_task()
            self.reporter.stop_task()
            self.total_error_count = self.max_error_threshold

        else:
            # Bulk comparison Task
            self.scrapper = Scrapper(self.process_id,self.url_to_scrape_file,self.field_to_xpath_file,self.scrapped_data_file,self.logger,self.output_dir,self.comp_type)
            self.logger.info('*****************************')
            self.scrapper.start_task()
            self.scrapper.stop_task()
            self.logger.info('*****************************')


            #Comparison task
            if (os.path.exists(os.path.join(self.output_dir,self.scrapped_data_file))):
                self.comparator = Comparator(self.process_id,self.platform_import_file,self.scrapped_data_file,self.comparision_report_file,self.logger,self.output_dir,self.scrapper)
                self.logger.info( '*****************************')
                self.comparator.start_task()
                self.total_error_count = self.comparator.stop_task()
                self.logger.info('*****************************')
            else:
                self.logger.info('ComparisonId = {} : Output csv missing. Comparison Not performed!!'.format(self.process_id))

    def stop_task(self):
        self.logger.info('*****************************')
        self.logger.info('ComparisonId = {} : Closing Comparator!!!!'.format(self.process_id))
        self.logger.info('*****************************')
        return self.total_error_count
Exemple #2
0
def main():
    global cfg, best_PSNR
    args = parse_args()
    cfg = Config.from_file(args.config)

    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
        str(gpu) for gpu in cfg.device)
    # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
    cudnn.benchmark = True
    cudnn.fastest = True

    if hasattr(datasets, cfg.dataset):
        ds = getattr(datasets, cfg.dataset)
    else:
        raise ValueError('Unknown dataset ' + cfg.dataset)

    model = getattr(models, cfg.model.name)(cfg.model).cuda()
    cfg.train.input_mean = model.input_mean
    cfg.train.input_std = model.input_std
    cfg.test.input_mean = model.input_mean
    cfg.test.input_std = model.input_std

    # Data loading code
    # train_loader = torch.utils.data.DataLoader(
    #     ds(cfg.train),
    #     batch_size=cfg.train.batch_size,
    #     shuffle=True,
    #     num_workers=0, #32,
    #     pin_memory=True,
    #     drop_last=True)

    val_loader = torch.utils.data.DataLoader(
        datasets.UCF101(cfg.test, False),
        batch_size=cfg.test.batch_size,
        shuffle=False,
        num_workers=0, #32,
        pin_memory=True)

    cfg.train.optimizer.args.max_iter = (
        cfg.train.optimizer.args.max_epoch * len(train_loader))

    policies = model.get_optim_policies()
    for group in policies:
        print(('group: {} has {} params, lr_mult: {}, decay_mult: {}'.format(
            group['name'],
            len(group['params']), group['lr_mult'], group['decay_mult'])))
    optimizer = Optim(policies, cfg.train.optimizer)

    if cfg.resume or cfg.weight:
        checkpoint_path = cfg.resume if cfg.resume else cfg.weight
        if os.path.isfile(checkpoint_path):
            print(("=> loading checkpoint '{}'".format(checkpoint_path)))
            checkpoint = torch.load(checkpoint_path)
            model.load_state_dict(checkpoint['state_dict'], False)
            if cfg.resume:
                optimizer.load_state_dict(checkpoint['grad_dict'])
        else:
            print(("=> no checkpoint found at '{}'".format(checkpoint_path)))

    model = DataParallelwithSyncBN(
        model, device_ids=range(len(cfg.device))).cuda()

    # define loss function (criterion) optimizer and evaluator
    criterion = torch.nn.MSELoss().cuda()
    evaluator = EvalPSNR(255.0 / np.mean(cfg.test.input_std))

    # PSNR = validate(val_loader, model, optimizer, criterion, evaluator)
    # return

    for epoch in range(cfg.train.optimizer.args.max_epoch):

        # train for one epoch
        # train(train_loader, model, optimizer, criterion, epoch)
        # evaluate on validation set
        if ((epoch + 1) % cfg.logging.eval_freq == 0
                or epoch == cfg.train.optimizer.args.max_epoch - 1):
            PSNR = validate(val_loader, model, optimizer, criterion, evaluator)
            # remember best PSNR and save checkpoint
            is_best = PSNR > best_PSNR
            best_PSNR = max(PSNR, best_PSNR)
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': dict(cfg),
                'state_dict': model.module.state_dict(),
                'grad_dict': optimizer.state_dict(),
                'best_PSNR': best_PSNR,
            }, is_best)
Exemple #3
0
 def __init__(self, logger):
     self.config = Config()
     self.sessions = {}
     self.parent_logger = logger
     self.logger = logger.get_child('session_manager')