コード例 #1
0
def render(params):
    """ Starts a test run and renders the output without updating the agent.

        Parameters
        ----------
        params : dict
            Directory that contains all experiment settings.
    """
    # Init Runner
    runner = Runner(run=1, params=params)
    runner.render_episode()
    # delete directory
    print('Removing experiment logs from {}'.format(params['exp_dir']))
    shutil.rmtree(params['exp_dir'])
コード例 #2
0
ファイル: scan_view.py プロジェクト: kForth/ClooneyScanner
    def submit_scan(self):
        if self.img is None:
            return
        self.enable_inputs([])

        edited_data = {}
        for r in range(self.data_preview.model().rowCount()):
            key = self.data_preview.model().index(r, 0).data()
            if key in self.fields.keys() and self.fields[key]['type'] in ['HorizontalOptions', 'Boolean']:
                value = self.data_preview.cellWidget(r, 2).currentText()
            elif key in ['pos']:
                value = self.data_preview.cellWidget(r, 2).currentText()
                if value not in self.scanner.POSITIONS:
                    self.enable_inputs()
                    return
                value = self.scanner.POSITIONS.index(value)
            else:
                value = self.data_preview.model().index(r, 2).data()
            data_type = self.data_types[key]
            data_type_name = data_type.__name__
            edited_data[key] = eval('{0}("{1}")'.format(data_type_name, value), {"__builtins__": {data_type_name: data_type}})
            edited_data["filename"] = self.filename

        data_errors = self.check_data(edited_data)
        if data_errors:
            self.errors = data_errors
            self.set_filepath_label_text(json.dumps(self.errors))
            self.enable_inputs()
            return
        try:
            data = json.load(open(self.data_filepath))
        except:
            data = []

        data.append(edited_data)
        json.dump(data, open(self.data_filepath, "w+"))

        data = {
            'filename': self.filename,
            'data': edited_data,
            'team': int(edited_data["team_number"]),
            'match': int(edited_data["match"]),
            'pos': int(edited_data["pos"]),
            'event': self.event_id
        }

        def post_func():
            try:
                requests.post('http://' + self.clooney_host + '/api/sql/add_entry', json=data)
            except Exception as ex:
                print(ex)
        Runner(target=post_func).run()
        self.generator_runner.run()

        shutil.move(self.scan_dir.strip('\\') + self.filename, self.scan_dir + "Processed/" + self.filename)
        cv2.imwrite(self.scan_dir + "Marked/" + self.filename, self.img)
        self.get_new_scan()
        self.enable_inputs()
コード例 #3
0
def main():
    logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                        datefmt='%m/%d/%Y %H:%M:%S',
                        level=logging.INFO)
    logger = logging.getLogger(__name__)
    args = get_args()
    print_args(args)

    device, n_gpu = initialization.init_cuda_from_args(args, logger=logger)
    initialization.init_seed(args, n_gpu=n_gpu, logger=logger)

    initialization.init_output_dir(args)
    initialization.save_args(args)

    classifier = simple_classifier(n_classes=args.n_classes, n_hidden=args.fc_dim)
    classifier = classifier.to(device)

    optimizer = SGD(classifier.parameters(), lr=0.001, momentum=0.9)
    runner = Runner(classifier=classifier,
                    optimizer=optimizer,
                    device=device,
                    rparams=RunnerParameters(
                        num_train_epochs=args.num_train_epochs,
                        train_batch_size=args.train_batch_size,
                        eval_batch_size=args.eval_batch_size,
                    ))

    # dataset
    train_dataset = torch.load(os.path.join(args.data_dir, "train.dataset"))
    eval_dataset = torch.load(os.path.join(args.data_dir, "dev.dataset"))
    if args.mnli:
        mm_eval_dataset = torch.load(os.path.join(args.data_dir, "mm_dev.dataset"))
    else:
        mm_eval_dataset = None

    # run training and validation
    to_save = runner.run_train_val(
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        mm_eval_set=mm_eval_dataset,
    )

    # save training state to output dir.
    torch.save(to_save, os.path.join(args.output_dir, "training.info"))
コード例 #4
0
def train(params, run):
    """ Starts training run.

        Parameters
        ----------
        params : dict
            Directory that contains all experiment settings.
        run: int
            Number of training run in experiment.

        Returns
        -------
        base_dir : Path
            Returns base directory path of run when complete.
    """
    # Init Runner
    runner = Runner(run=run, params=params)
    # Run 
    runner.run_experiment()
    return runner.get_base_dir()
コード例 #5
0
def test(params):
    """ Starts a test run and renders the output without updating the agent.

        Parameters
        ----------
        params : dict
            Directory that contains all experiment settings.
    """
    # Init Runner
    params['random_seed'] = range(params['test_episodes'])
    rewards = []
    for episode in range(1, params['test_episodes']+1):
        runner = Runner(run=episode, params=params)
        episode_results = runner.run_episode('test')
        rewards.append(episode_results['reward'])
        print('Test: {:3d}, Reward: {:.2f}, Current Avg Reward: {:.2f}'.format(episode, episode_results['reward'], sum(rewards) / episode))
    reward_avg = sum(rewards) / params['test_episodes']
    print()
    print('Avg Reward per episode: {:.2f} (after {:d} test episodes)'.format(reward_avg, params['test_episodes']))
    print('Removing experiment logs from {}'.format(params['exp_dir']))
    shutil.rmtree(params['exp_dir'])
コード例 #6
0
ファイル: run.py プロジェクト: mszulc913/acerac
def main():
    args = parser.parse_args()

    cmd_parameters, unknown_args = parser.parse_known_args()
    if len(unknown_args):
        print("Not recognized arguments: ", str(vars(unknown_args)))
        return

    parameters = {k: v for k, v in vars(cmd_parameters).items() if v is not None}
    parameters.pop('env_name')
    evaluate_time_steps_interval = parameters.pop('evaluate_time_steps_interval')
    num_evaluation_runs = parameters.pop('num_evaluation_runs')
    max_time_steps = parameters.pop('max_time_steps')
    no_checkpoint = parameters.pop('no_checkpoint')
    no_tensorboard = parameters.pop('no_tensorboard')
    record_time_steps = parameters.pop('record_time_steps', None)
    experiment_name = parameters.pop('experiment_name')
    algorithm = parameters.pop('algo')
    log_dir = parameters.pop('log_dir')
    synchronous = parameters.pop('synchronous')

    runner = Runner(
        environment_name=cmd_parameters.env_name,
        algorithm=algorithm,
        algorithm_parameters=parameters,
        num_parallel_envs=cmd_parameters.num_parallel_envs,
        log_dir=log_dir,
        max_time_steps=max_time_steps,
        num_evaluation_runs=num_evaluation_runs,
        evaluate_time_steps_interval=evaluate_time_steps_interval,
        experiment_name=experiment_name,
        asynchronous=not synchronous,
        log_tensorboard=not no_tensorboard,
        do_checkpoint=not no_checkpoint,
        record_time_steps=record_time_steps
    )

    runner.run()
コード例 #7
0
    def _start_test(self,
                    test_func,
                    need_cleanup=False,
                    full_matrix=True,
                    max_cases=None,
                    only_doc=True):
        title = self._get_func_name(test_func)

        self.params.doc_logger = self.params.case_logger
        self.params.logger.info("=" * 8 + " %s " % title + "=" * 8)
        self.params.logger.info("")
        case_index = 1

        # create runner
        runner = Runner(self.params, self.checkpoints, self.doc_funcs,
                        self.params.logger, self.params.doc_logger)
        extra_handler = self._load_extra_handler(runner)

        # generate test case
        with time_log('Compute case permutations'):
            # TODO: is that a good idea to use handler to gen case ?
            case_matrix = sorted(
                list(
                    extra_handler.gen_cases(test_func,
                                            need_cleanup=need_cleanup)))

        LOGGER.info('Find %d valid cases', len(case_matrix))

        # training part
        # self._training(case_matrix, test_func)
        # return

        # TODO use a class to be a cases container
        extra_cases = {}
        while case_matrix:
            case = case_matrix.pop(0)
            new_extra_cases, is_mist = runner.run_case(case,
                                                       case_index,
                                                       test_func,
                                                       need_cleanup,
                                                       only_doc=only_doc)
            if not full_matrix and not is_mist:
                break
            for mist_name, cases in new_extra_cases.items():
                extra_cases.setdefault(mist_name, []).extend(cases)
            case_index += 1
            if max_cases and case_index > max_cases:
                break
コード例 #8
0
def main():
    c = Config()
    g = Github(c.token)
    r = Runner(c, g)
    r.run()
コード例 #9
0
 def __init__(self, profile=None, runner_path=None):
     self.profile = profile
     self.__runner = Runner(runner_path)
コード例 #10
0
    logger.setLevel(logging.INFO)
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(levelname)s - %(message)s')
    ch.setFormatter(formatter)
    logger.addHandler(ch)

    dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(
        args)
    model = get_model(args, dataset_cls, embedding)

    if args.model == 'sif':
        model.populate_word_frequency_estimation(train_loader)

    total_params = 0
    for param in model.parameters():
        size = [s for s in param.size()]
        total_params += np.prod(size)
    logger.info('Total number of parameters: %s', total_params)

    loss_fn, metrics, y_to_score, resolved_pred_to_score = get_dataset_configurations(
        args)

    optimizer = O.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=args.lr,
                       weight_decay=args.regularization)
    runner = Runner(model, loss_fn, metrics, optimizer, y_to_score,
                    resolved_pred_to_score, args.device, None)
    runner.run(args.epochs, train_loader, dev_loader, test_loader,
               args.log_interval)
コード例 #11
0
def test_profiles(config_manager, repo_manager, monkeypatch, taker_app):
    config_manager.user_config(CONFIG_NAME).open(
        'w', encoding='utf8').write('''
[compiler]
time-limit = 3.0
memory-limit = 100.0

[checker]
time-limit = 6.0
memory-limit = 200.0

[validator]
time-limit = 9.0
memory-limit = 300.0

[generator]
time-limit = 12.0
memory-limit = 400.0

[custom]
time-limit = 15.0
memory-limit = 500.0
''')

    runner = Runner()
    repo = repo_manager.repo

    compiler_profile = create_profile('compiler', repo)
    assert type(compiler_profile) is CompilerRunProfile
    compiler_profile.update_runner(runner)
    assert runner.parameters.time_limit == 3.0
    assert runner.parameters.memory_limit == 100.0

    checker_profile = create_profile('checker', repo)
    assert type(checker_profile) is CheckerRunProfile
    checker_profile.update_runner(runner)
    assert runner.parameters.time_limit == 6.0
    assert runner.parameters.memory_limit == 200.0

    validator_profile = create_profile('validator', repo)
    assert type(validator_profile) is ValidatorRunProfile
    validator_profile.update_runner(runner)
    assert runner.parameters.time_limit == 9.0
    assert runner.parameters.memory_limit == 300.0

    generator_profile = create_profile('generator', repo)
    assert type(generator_profile) is GeneratorRunProfile
    generator_profile.update_runner(runner)
    assert runner.parameters.time_limit == 12.0
    assert runner.parameters.memory_limit == 400.0

    with pytest.raises(KeyError):
        create_profile('custom', repo)

    register_profile(CustomRunProfile)
    custom_profile = create_profile('custom', repo)
    assert type(custom_profile) is CustomRunProfile
    custom_profile.update_runner(runner)
    assert runner.parameters.time_limit == 15.0
    assert runner.parameters.memory_limit == 500.0

    run_count = 0

    def run(self):
        nonlocal run_count
        run_count += 1

    monkeypatch.setattr(Runner, 'run', run)
    profiled_runner = ProfiledRunner(generator_profile)
    profiled_runner.stdin = 'some input'
    in_runner = profiled_runner._ProfiledRunner__runner
    profiled_runner.run([fspath(taker_app)])
    assert run_count == 1
    assert in_runner.parameters.time_limit == 12.0
    assert in_runner.parameters.memory_limit == 400.0
    assert not in_runner.pass_stdin
    assert not in_runner.capture_stdout
    assert in_runner.capture_stderr
    assert in_runner.stdin == 'some input'
    assert in_runner.stdin == profiled_runner.stdin
    assert in_runner.parameters.working_dir == taker_app.parent

    profiled_runner.run([fspath(taker_app)], repo_manager.task_dir)
    assert run_count == 2
    assert in_runner.parameters.working_dir == repo_manager.task_dir

    profiles = set(list_profiles())
    assert 'compiler' in profiles
    assert 'validator' in profiles
コード例 #12
0
def main():
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
        datefmt='%m/%d/%Y %H:%M:%S',
        level=logging.INFO)
    logger = logging.getLogger(__name__)
    args = get_args()
    # print_args(args)

    device, n_gpu = initialization.init_cuda_from_args(args, logger=logger)
    initialization.init_seed(args, n_gpu=n_gpu, logger=logger)

    initialization.init_output_dir(args)
    initialization.save_args(args)

    if args.use_individual:
        classifier = simple_classifier_individual(n_classes=args.n_classes,
                                                  n_hidden=args.fc_dim)
    else:
        classifier = simple_classifier(n_classes=args.n_classes,
                                       n_hidden=args.fc_dim)
    classifier = classifier.to(device)

    optimizer = SGD(classifier.parameters(), lr=0.001, momentum=0.9)
    runner = Runner(classifier=classifier,
                    optimizer=optimizer,
                    device=device,
                    rparams=RunnerParameters(
                        num_train_epochs=args.num_train_epochs,
                        train_batch_size=args.train_batch_size,
                        eval_batch_size=args.eval_batch_size,
                    ),
                    use_individual=args.use_individual)

    # dataset
    train_dataset = torch.load(os.path.join(args.data_dir, "train.dataset"))
    eval_dataset = torch.load(os.path.join(args.data_dir, "dev.dataset"))
    test_dataset = torch.load(os.path.join(args.data_dir, "test.dataset"))

    # run train and validation with state dicts returned
    if args.mnli:
        eval_info, state_dicts = runner.run_train_val_with_state_dict_returned(
            train_dataset=train_dataset,
            eval_dataset=eval_dataset,
            mm_eval_set=torch.load(
                os.path.join(args.data_dir, "mm_dev.dataset")))
    else:
        eval_info, state_dicts = runner.run_train_val_with_state_dict_returned(
            train_dataset=train_dataset,
            eval_dataset=eval_dataset,
        )

    torch.save(eval_info, os.path.join(args.output_dir, "training.info"))

    # find highest validation results, load model state dict, and then run prediction @ test set.
    val_acc = []
    mm_val_acc = []
    if args.mnli:
        for item in eval_info:
            val_acc.append(item[0]['accuracy'])
            mm_val_acc.append(item[1]['accuracy'])
        idx = val_acc.index(max(val_acc))
        print("highest accuracy on validation is: {}, index = {}. \n"
              "mis-matched is: {} \n"
              "Load state dicts and run testing...".format(
                  val_acc[idx], idx, mm_val_acc[idx]))
    else:
        for item in eval_info:
            val_acc.append(item['accuracy'])
        idx = val_acc.index(max(val_acc))
        print("highest accuracy on validation is: {}, index = {}. \n"
              "Load state dicts and run testing...".format(val_acc[idx], idx))

    torch.save(state_dicts[idx], os.path.join(args.output_dir, "state.p"))

    runner.classifier.load_state_dict(state_dicts[idx])
    logits = runner.run_test(test_dataset)

    df = pd.DataFrame(logits)
    df.to_csv(os.path.join(args.output_dir, "test_preds.csv"),
              header=False,
              index=False)
    # HACK for MNLI-mismatched
    if args.mnli:
        mm_test_dataset = torch.load(
            os.path.join(args.data_dir, "mm_test.dataset"))
        logits = runner.run_test(mm_test_dataset)
        df = pd.DataFrame(logits)
        df.to_csv(os.path.join(args.output_dir, "mm_test_preds.csv"),
                  header=False,
                  index=False)
コード例 #13
0
ファイル: scan_view.py プロジェクト: kForth/ClooneyScanner
    def __init__(self, event_id, data_file, config_file, fields_file, scan_dirpath, clooney_host):
        QMainWindow.__init__(self)
        uic.loadUi('ui/ScanView.ui', self)

        try:
            self.tba = TBA('GdZrQUIjmwMZ3XVS622b6aVCh8CLbowJkCs5BmjJl2vxNuWivLz3Sf3PaqULUiZW')
            self.teams = self.tba.get_event_teams_keys(event_id)
            self.matches = self.tba.get_event_matches_simple(event_id)
        except:
            self.tba = None
            self.teams = None
            self.matches = None

        self.data_preview.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive)
        self.data_preview.horizontalHeader().setSectionResizeMode(1, QHeaderView.Interactive)
        self.data_preview.horizontalHeader().setSectionResizeMode(2, QHeaderView.Stretch)
        self.data_preview.horizontalHeader().resizeSection(0, 160)
        self.data_preview.horizontalHeader().resizeSection(1, 160)

        self.scan_preview.setScaledContents(True)
        self.scan_preview.mousePressEvent = self.handle_img_click

        self.click_mode = ""
        self.corners = []

        self.submit_button.clicked.connect(self.submit_scan)
        self.reject_button.clicked.connect(self.reject_scan)
        self.go_back_button.clicked.connect(self.load_last_sheet)

        self.refresh_button.clicked.connect(self.look_for_scan)

        self.four_corners_button.clicked.connect(self.handle_four_corners_button)
        self.rotate_180_button.clicked.connect(self.handle_rotate_180_button)
        self.toggle_view_button.clicked.connect(self.handle_toggle_view_button)

        self.config_file = config_file
        self.fields_file = fields_file

        self.event_id = event_id
        self.data_filepath = data_file
        self.config = json.load(open(self.config_file))
        self.field_list = json.load(open(self.fields_file))
        self.scan_dir = scan_dirpath
        self.clooney_host = clooney_host

        for sub_folder in ["Processed", "Rejected", "Marked", "images"]:
            if not os.path.isdir(self.scan_dir + sub_folder + "/"):
                os.makedirs(self.scan_dir + sub_folder + "/")

        self.fields = dict(zip(map(lambda x: x['id'], self.field_list), self.field_list))

        self.scanner = Scanner(self.field_list, self.config, self.scan_dir + "images/")

        self.generator = SpreadsheetGenerator('db.sqlite', self.tba)
        self.generator_runner = Runner('Generator', self.update_spreadsheet)
        self.last_updated = time.time()
        self.should_update_again = False
        self.generator_runner.run(run_anyway=True)

        self.backup_img = np.zeros((1, 1, 3), np.uint8)
        self.img = np.zeros((1, 1, 3), np.uint8)
        self.raw_img = np.zeros((1, 1, 3), np.uint8)
        self.current_img = np.zeros((1, 1, 3), np.uint8)
        self.selected_img = "img"
        self.filename = ""
        self.data_types = {}
        self.filepath_label_old_text = ""
        self.errors = []

        self.get_new_scan()

        self.show()
コード例 #14
0
ファイル: scan_view.py プロジェクト: kForth/ClooneyScanner
class ScanView(QMainWindow):

    def __init__(self, event_id, data_file, config_file, fields_file, scan_dirpath, clooney_host):
        QMainWindow.__init__(self)
        uic.loadUi('ui/ScanView.ui', self)

        try:
            self.tba = TBA('GdZrQUIjmwMZ3XVS622b6aVCh8CLbowJkCs5BmjJl2vxNuWivLz3Sf3PaqULUiZW')
            self.teams = self.tba.get_event_teams_keys(event_id)
            self.matches = self.tba.get_event_matches_simple(event_id)
        except:
            self.tba = None
            self.teams = None
            self.matches = None

        self.data_preview.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive)
        self.data_preview.horizontalHeader().setSectionResizeMode(1, QHeaderView.Interactive)
        self.data_preview.horizontalHeader().setSectionResizeMode(2, QHeaderView.Stretch)
        self.data_preview.horizontalHeader().resizeSection(0, 160)
        self.data_preview.horizontalHeader().resizeSection(1, 160)

        self.scan_preview.setScaledContents(True)
        self.scan_preview.mousePressEvent = self.handle_img_click

        self.click_mode = ""
        self.corners = []

        self.submit_button.clicked.connect(self.submit_scan)
        self.reject_button.clicked.connect(self.reject_scan)
        self.go_back_button.clicked.connect(self.load_last_sheet)

        self.refresh_button.clicked.connect(self.look_for_scan)

        self.four_corners_button.clicked.connect(self.handle_four_corners_button)
        self.rotate_180_button.clicked.connect(self.handle_rotate_180_button)
        self.toggle_view_button.clicked.connect(self.handle_toggle_view_button)

        self.config_file = config_file
        self.fields_file = fields_file

        self.event_id = event_id
        self.data_filepath = data_file
        self.config = json.load(open(self.config_file))
        self.field_list = json.load(open(self.fields_file))
        self.scan_dir = scan_dirpath
        self.clooney_host = clooney_host

        for sub_folder in ["Processed", "Rejected", "Marked", "images"]:
            if not os.path.isdir(self.scan_dir + sub_folder + "/"):
                os.makedirs(self.scan_dir + sub_folder + "/")

        self.fields = dict(zip(map(lambda x: x['id'], self.field_list), self.field_list))

        self.scanner = Scanner(self.field_list, self.config, self.scan_dir + "images/")

        self.generator = SpreadsheetGenerator('db.sqlite', self.tba)
        self.generator_runner = Runner('Generator', self.update_spreadsheet)
        self.last_updated = time.time()
        self.should_update_again = False
        self.generator_runner.run(run_anyway=True)

        self.backup_img = np.zeros((1, 1, 3), np.uint8)
        self.img = np.zeros((1, 1, 3), np.uint8)
        self.raw_img = np.zeros((1, 1, 3), np.uint8)
        self.current_img = np.zeros((1, 1, 3), np.uint8)
        self.selected_img = "img"
        self.filename = ""
        self.data_types = {}
        self.filepath_label_old_text = ""
        self.errors = []

        self.get_new_scan()

        self.show()

    def update_spreadsheet(self, delay=30, run_anyway=False):
        time_delta = time.time() - self.last_updated
        if time_delta > 60 or run_anyway:
            last_update = self.last_updated
            self.last_updated = time.time()
            self.generator.create_spreadsheet_for_event(self.event_id)
            try:
                self.generator.upload_to_google_drive('Clooney.xlsx', 'Clooney {}'.format(self.event_id))
            except:
                print("Couldn't Upload Spreadsheet")
            print("Updated Spreadsheet @ {}".format(self.last_updated))
            time.sleep(max(0, delay - (time.time() - last_update)))
            if self.should_update_again:
                self.should_update_again = False
                self.update_spreadsheet(run_anyway=True)
        else:
            self.should_update_again = True

    def enable_inputs(self, enabled=('submit', 'reject', 'go_back', 'refresh', 'four', 'rotate', 'toggle', 'data')):
        self.submit_button.setEnabled('submit' in enabled)
        self.reject_button.setEnabled('reject' in enabled)
        self.go_back_button.setEnabled('go_back' in enabled)
        self.refresh_button.setEnabled('refresh' in enabled)
        self.four_corners_button.setEnabled('four' in enabled)
        self.rotate_180_button.setEnabled('rotate' in enabled)
        self.toggle_view_button.setEnabled('toggle' in enabled)
        self.data_preview.setEnabled('data' in enabled)

    def handle_img_click(self, event):
        if self.click_mode == "four_corners":
            img_h, img_w = self.raw_img.shape[:-1]
            w_scale = img_w / self.scan_preview.size().width()
            h_scale = img_h / self.scan_preview.size().height()
            x = int(event.x() * w_scale)
            y = int(event.y() * h_scale)
            self.corners.append((x, y))
            cv2.rectangle(self.current_img, (x - 10, y - 10), (x+10, y+10), (0, 255, 0), thickness=10)
            self.set_img(self.current_img)
            if len(self.corners) == 4:
                selected_points = sorted(self.corners, key=lambda l: sum(l))
                new_points = ((200, 200), (img_w - 200, 200), (200, img_h - 200), (img_w - 200, img_h - 200))
                new_points = sorted(new_points, key=lambda e: sum(e))
                warp_matrix = cv2.getPerspectiveTransform(np.float32(selected_points), np.float32(new_points))
                self.raw_img = cv2.warpPerspective(self.raw_img, warp_matrix, (img_w, img_h), borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))
                self.reset_click_mode()
                self.get_new_scan(self.raw_img)

    def handle_toggle_view_button(self):
        if self.selected_img == 'img':
            self.selected_img = 'raw'
            self.set_img(self.raw_img)
            self.current_img = np.copy(self.raw_img)
        else:
            self.selected_img = 'img'
            self.set_img(self.img)
            self.current_img = np.copy(self.img)

    def set_filepath_label_text(self, text):
        self.filepath_label_old_text = self.filepath_label.text()
        self.filepath_label.setText(text)

    def handle_four_corners_button(self):
        if self.click_mode == "four_corners":
            self.reset_click_mode()
        else:
            self.set_filepath_label_text('Click on the 4 corners of the bounding box.')
            self.selected_img = 'raw'
            self.set_img(self.raw_img)
            self.enable_inputs('four')
            self.corners = []
            self.scan_preview.setCursor(Qt.PointingHandCursor)
            self.click_mode = "four_corners"

    def handle_rotate_180_button(self):
        img_h, img_w = self.img.shape[:-1]
        warp_matrix = cv2.getRotationMatrix2D((img_w / 2, img_h / 2), 180, 1)
        img = self.img.copy()
        img = cv2.warpAffine(img, warp_matrix, (img_w, img_h), cv2.INTER_LINEAR)
        self.img = img
        self.set_img(img)
        self.reset_click_mode()

    def reset_click_mode(self):
        self.scan_preview.setCursor(Qt.ArrowCursor)
        self.set_filepath_label_text(self.filepath_label_old_text)
        self.enable_inputs()
        self.click_mode = ""
        self.corners = []
        self.set_img(self.img)
        self.selected_img = 'img'

    def check_data(self, data):
        errors = []
        keys_to_check = ['team_number', 'match', 'pos']
        for key in keys_to_check:
            if not (data[key] or data[key] in [False, 0]):
                errors.append('missing_' + key)
        if self.teams:
            if "frc{}".format(data['team_number']) not in self.teams:
                errors.append('team_not_at_event')
        if self.matches:
            if data['match'] not in [e['match_number'] for e in self.matches]:
                errors.append('match_number_not_at_event')
            alliance = 'red' if data['pos'] <= 2 else 'blue'
            expected_team = [e for e in self.matches if e['match_number'] == data['match'] and e['comp_level'] == 'qm'][0]['alliances'][alliance]['team_keys'][data['pos'] % 3]
            if "frc{}".format(data['team_number']) != expected_team:
                errors.append('expected_different_team: {}'.format(expected_team))
        return errors

    def submit_scan(self):
        if self.img is None:
            return
        self.enable_inputs([])

        edited_data = {}
        for r in range(self.data_preview.model().rowCount()):
            key = self.data_preview.model().index(r, 0).data()
            if key in self.fields.keys() and self.fields[key]['type'] in ['HorizontalOptions', 'Boolean']:
                value = self.data_preview.cellWidget(r, 2).currentText()
            elif key in ['pos']:
                value = self.data_preview.cellWidget(r, 2).currentText()
                if value not in self.scanner.POSITIONS:
                    self.enable_inputs()
                    return
                value = self.scanner.POSITIONS.index(value)
            else:
                value = self.data_preview.model().index(r, 2).data()
            data_type = self.data_types[key]
            data_type_name = data_type.__name__
            edited_data[key] = eval('{0}("{1}")'.format(data_type_name, value), {"__builtins__": {data_type_name: data_type}})
            edited_data["filename"] = self.filename

        data_errors = self.check_data(edited_data)
        if data_errors:
            self.errors = data_errors
            self.set_filepath_label_text(json.dumps(self.errors))
            self.enable_inputs()
            return
        try:
            data = json.load(open(self.data_filepath))
        except:
            data = []

        data.append(edited_data)
        json.dump(data, open(self.data_filepath, "w+"))

        data = {
            'filename': self.filename,
            'data': edited_data,
            'team': int(edited_data["team_number"]),
            'match': int(edited_data["match"]),
            'pos': int(edited_data["pos"]),
            'event': self.event_id
        }

        def post_func():
            try:
                requests.post('http://' + self.clooney_host + '/api/sql/add_entry', json=data)
            except Exception as ex:
                print(ex)
        Runner(target=post_func).run()
        self.generator_runner.run()

        shutil.move(self.scan_dir.strip('\\') + self.filename, self.scan_dir + "Processed/" + self.filename)
        cv2.imwrite(self.scan_dir + "Marked/" + self.filename, self.img)
        self.get_new_scan()
        self.enable_inputs()

    def reject_scan(self):
        if self.img is None:
            return
        shutil.move(self.scan_dir + self.filename, self.scan_dir + "Rejected/" + self.filename)
        self.generator_runner.run()
        self.get_new_scan()

    def set_img(self, cv_img):
        self.current_img = np.copy(cv_img)
        height, width, channels = cv_img.shape
        cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
        q_image = QImage(cv_img.data, width, height, width * 3, QImage.Format_RGB888)
        self.scan_preview.setPixmap(QPixmap.fromImage(q_image))

    def set_data(self, data):
        self.data_types = dict(zip(data.keys(), map(type, data.values())))
        self.data_preview.setRowCount(len(data))

        for row in range(len(data)):
            key = list(data.keys())[row]
            key_item = QTableWidgetItem(key)
            key_item.setFlags(key_item.flags() & Qt.ItemIsEditable)
            if key in ['match', 'pos']:
                label_item = QTableWidgetItem(key.title())
            else:
                label_item = QTableWidgetItem(self.fields[key]['options']['label'])
            label_item.setFlags(label_item.flags() & Qt.ItemIsEditable)
            self.data_preview.setItem(row, 0, key_item)
            self.data_preview.setItem(row, 1, label_item)
            if key in self.fields.keys() and self.fields[key]['type'] in ['HorizontalOptions', 'Boolean']:
                c = QComboBox()
                if self.fields[key]['type'] == 'Boolean':
                    options = [1, 0]
                else:
                    options = self.fields[key]['options']['options'] + ['']
                c.addItems(map(str, options))
                c.setCurrentIndex(options.index(data[key]))
                self.data_preview.setCellWidget(row, 2, c)
            elif key in ['pos']:
                c = QComboBox()
                c.addItems(self.scanner.POSITIONS)
                if data[key] >= len(self.scanner.POSITIONS):
                    c.setCurrentIndex(0)
                c.setCurrentIndex(data[key])
                self.data_preview.setCellWidget(row, 2, c)
            else:
                self.data_preview.setItem(row, 2, QTableWidgetItem(str(data[key])))

    def look_for_scan(self):
        self.config = json.load(open(self.config_file))
        self.field_list = json.load(open(self.fields_file))
        self.scanner.set_config(self.config)
        self.scanner.set_fields(self.field_list)
        self.enable_inputs([])
        self.update()
        self.get_new_scan()
        self.enable_inputs()

    def load_last_sheet(self):
        try:
            data = json.load(open(self.data_filepath))
        except:
            data = []
        if data:
            info = data[-1]
            self.filename = info['filename']
            del info['filename']
            shutil.move(self.scan_dir + "Processed/" + self.filename, self.scan_dir + self.filename)
            self.set_data(info)
            self.set_img(cv2.imread(self.scan_dir + "Marked/" + self.filename))
            self.set_filepath_label_text(self.filename)
            self.enable_inputs()

            data = data[:-1]
            json.dump(data, open(self.data_filepath, "w+"))

    def get_new_scan(self, raw_scan=None):
        self.enable_inputs([])
        if raw_scan is None:
            try:
                files = glob.glob(self.scan_dir + "*jpg") + glob.glob(self.scan_dir + "*.png")
                selected_file = files[0]
                self.filename = selected_file.split("/")[-1]
                self.set_filepath_label_text(files[0])
                raw_scan = cv2.imread(selected_file)
            except Exception as ex:
                print("Failed to read img")
                self.filepath_label.setText(str(ex))
                self.set_img(np.zeros((1, 1, 3), np.uint8))
                self.set_data({})
                self.refresh_button.setEnabled(True)
                return

        self.raw_img = np.copy(raw_scan)
        data, marked_sheet = self.scanner.scan_sheet(raw_scan)

        self.img = marked_sheet
        self.set_img(self.img)
        self.set_data(data)

        self.enable_inputs()
コード例 #15
0
 def new_runner(self, game):
     self.runners.append(Runner(game))
コード例 #16
0
def test_compiler(tmpdir, repo_manager, language_manager):
    tmpdir = Path(str(tmpdir))
    repo = repo_manager.repo

    lang_cpp = language_manager.get_lang('cpp.g++14')
    lang_py = language_manager.get_lang('py.py3')

    src_dir = tmpdir / 'src'
    src_dir.mkdir()

    for fname in ['code.cpp', 'compile_error.cpp', 'code_libs.cpp', 'code.py']:
        shutil.copy(fspath(tests_location() / fname), fspath(src_dir / fname))

    src_cpp1 = src_dir / 'code.cpp'
    src_cpp2 = src_dir / 'compile_error.cpp'
    src_cpp3 = src_dir / 'code_libs.cpp'
    src_py1 = src_dir / 'code.py'
    src_bad1 = src_dir / 'bad_code.py'
    src_bad2 = src_dir / 'bad_code2.py'

    exe_cpp1 = src_dir / ('1-code' + default_exe_ext())
    exe_py1 = src_dir / ('1-code' + default_exe_ext())

    with pytest.raises(CompileError):
        compiler = Compiler(repo, lang_cpp, src_cpp2)
        compiler.compile()
    with pytest.raises(CompileError):
        compiler = Compiler(repo, lang_cpp, src_py1)
        compiler.compile()
    with pytest.raises(CompileError):
        compiler = Compiler(repo, lang_py, src_bad1, src_bad2)
        compiler.compile()

    runner = Runner()
    runner.capture_stdout = True
    runner.capture_stderr = True

    compiler = Compiler(repo, lang_cpp, src_cpp1)
    compiler.compile()
    runner.parameters.executable = compiler.exe_file
    runner.run()
    assert runner.results.status == Status.OK
    assert runner.stdout == 'hello world\n'

    compiler = Compiler(repo,
                        lang_cpp,
                        src_cpp3,
                        library_dirs=[tests_location()])
    compiler.compile()
    runner.parameters.executable = compiler.exe_file
    runner.run()
    assert runner.results.status == Status.OK
    assert runner.stdout == 'hello world\n'

    compiler = Compiler(repo, lang_py, src_py1)
    compiler.compile()
    runner.parameters.executable = lang_py.run_args(compiler.exe_file)[0]
    runner.parameters.args = lang_py.run_args(compiler.exe_file)[1:]
    runner.run()
    assert runner.results.status == Status.OK
    assert runner.stdout == 'hello world\n'

    compiler = Compiler(repo, lang_cpp, src_cpp1, exe_cpp1)
    compiler.compile()
    runner.parameters.executable = exe_cpp1
    runner.run()
    assert runner.results.status == Status.OK
    assert runner.stdout == 'hello world\n'

    compiler = Compiler(repo, lang_py, src_py1, exe_py1)
    compiler.compile()
    runner.parameters.executable = lang_py.run_args(exe_py1)[0]
    runner.parameters.args = lang_py.run_args(exe_py1)[1:]
    runner.run()
    assert runner.results.status == Status.OK
    assert runner.stdout == 'hello world\n'