Beispiel #1
0
def export_xmls_from_db():
    """
    export all db jobs from db to db/xml_export directory
    """
    now_iso = datetime.now().isoformat()
    user = os.environ["USERNAME"]
    domain = os.environ["USERDOMAIN"]
    all_db_jobs = rms_db.all()

    print(now_iso, user, domain)

    for job in all_db_jobs:
        job_name = job["<project_code>"] + "_" + job["<command>"]
        start_time = now_iso.split("T")[0] + "T" + job[">start_time"]
        cmd_str = " ".join(serdes(job=job)[1:])
        # print(cmd_str)
        xml_prms = {r"\{user_id\}": f"{domain}\\\\{user}",
                    r"\{author\}": f"{domain}\\\\{user}",
                    r"\{description\}": f"created: {now_iso}",
                    r"\{creation_date\}": now_iso,
                    r"\{start_time\}": start_time,
                    r"\{application\}": sys.executable.replace("\\", "\\\\"),
                    r"\{args\}": cmd_str.replace("\\", "\\\\"),
                    }
        with open(rms_paths.xml_exp / "rms_xml_daily_template.xml", "r", encoding="utf-16le") as xml_template:
            xml_content = xml_template.read()

        for param in xml_prms:
            re_prm = re.compile(param)
            xml_content = re.sub(re_prm, xml_prms[param], xml_content)

        with open(rms_paths.xml_exp / f"{job_name}.xml", "w", encoding="utf-16le") as rms_export:
            rms_export.write(xml_content)

        print(colorful.bold_green(f"exported: {job_name}"))
Beispiel #2
0
def test_step_match(sentence, expected_step, expected_arguments, steps):
    sys.stdout.write('{0} STEP "{1}" SHOULD MATCH {2}    '.format(
        colorful.yellow('>>'), colorful.cyan(sentence),
        colorful.cyan(expected_step)))

    result = match_step(sentence, steps)
    if not result:
        output_failure(
            None, ['Expected sentence didn\'t match any step implementation'])
        return False

    if expected_step != result.func.__name__:
        output_failure(result.func, [
            'Expected sentence matched {0} instead of {1}'.format(
                result.func.__name__, expected_step)
        ])
        return False

    if expected_arguments:
        arguments = merge_step_args(result)
        expected_arguments = {
            k: v
            for expected_arguments in expected_arguments
            for k, v in expected_arguments.items()
        }
        argument_errors = check_step_arguments(expected_arguments, arguments)
        if argument_errors:
            output_failure(result.func, argument_errors)
            return False

    print(u(colorful.bold_green(u'✔')))
    return True
Beispiel #3
0
def update_graphs(project_code, html_path):
    pd.set_option('display.width', 1800)
    html_path = op.join(html_path, "{0}.html".format(project_code))

    qc_path = op.dirname(op.abspath(__file__))
    commands_dir = op.dirname(qc_path)
    root_dir = op.dirname(commands_dir)
    log_dir = op.join(root_dir, "logs")

    csv_path = op.join(log_dir, project_code + ".csv")

    csv = pd.read_csv(csv_path, delimiter=";")
    csv.timeStamp = pd.to_datetime(csv.timeStamp)

    output_file(html_path, mode="inline")

    topics = {
        "q_": "QC",
        "l_": "LINKS",
        "g_": "GROUPS",
        "v_": "VIEWS",
        "d_": "2D",
        "s_": "STYLES",
        "e_": "ELEMENTS",
        "m_": "PROJECT_SQM",
    }

    graphs = graph(csv, project_code, topics)

    save(column(graphs), validate=False)
    print(colorful.bold_green(f" {html_path} updated successfully."))
Beispiel #4
0
def check_extensions():
    newline()

    subheader("Verifying extension programs...")
    errors = []

    programs = (
        (
            "jpegtran",
            "Thumbor uses jpegtran for optimizing JPEG images. "
            "For more information visit https://linux.die.net/man/1/jpegtran.",
        ),
        (
            "ffmpeg",
            "Thumbor uses ffmpeg for rendering animated images as GIFV. "
            "For more information visit https://www.ffmpeg.org/.",
        ),
        (
            "gifsicle",
            "Thumbor uses gifsicle for better processing of GIF images. "
            "For more information visit https://www.lcdf.org/gifsicle/.",
        ),
    )

    for program, error_message in programs:
        path = which(program)
        if path is None:
            print(cf.bold_red("❎ %s is not installed." % program))
            print(error_message)
            newline()
            errors.append(error_message)
        else:
            print(cf.bold_green("✅ %s is installed correctly." % program))

    return errors
Beispiel #5
0
def check_modules():
    newline()
    subheader("Verifying libraries support...")
    errors = []

    modules = (
        (
            "pycurl",
            "Thumbor works much better with PyCurl. For more information visit http://pycurl.io/.",
        ),
        (
            "cv2",
            "Thumbor requires OpenCV for smart cropping. For more information check https://opencv.org/.",
        ),
        (
            "pyexiv2",
            "Thumbor uses exiv2 for reading image metadata. For more information check https://python3-exiv2.readthedocs.io/en/latest/.",
        ),
        (
            "cairosvg",
            "Thumbor uses CairoSVG for reading SVG files. For more information check https://cairosvg.org/.",
        ),
    )

    for module, error_message in modules:
        try:
            import_module(module)  # NOQA
            print(cf.bold_green("✅ %s is installed correctly." % module))
        except ImportError as error:
            print(cf.bold_red("❎ %s is not installed." % module))
            print(error_message)
            newline()
            errors.append("%s - %s" % (str(error), error_message))

    return errors
Beispiel #6
0
def test_step_not_match(sentence, expected_not_matching_step, steps):
    step_to_print = (
        colorful.cyan(expected_not_matching_step)
        if expected_not_matching_step
        else "ANY"
    )
    sys.stdout.write(
        '{0} STEP "{1}" SHOULD NOT MATCH {2}    '.format(
            colorful.yellow(">>"), colorful.cyan(sentence), step_to_print
        )
    )

    result = match_step(sentence, steps)
    if result:
        if (
            not expected_not_matching_step
            or result.func.__name__ == expected_not_matching_step
        ):
            output_failure(
                None,
                [
                    "Expected sentence did match {0} but it shouldn't".format(
                        expected_not_matching_step
                    )
                ],
            )
            return False

    print(u(colorful.bold_green("✔")))
    return True
Beispiel #7
0
def check_model_path(job_id):
    job_id = int(job_id)
    job = rms_db.get(doc_id=job_id)
    model_path = pathlib.Path(job["<full_model_path>"])
    if model_path.exists():
        print(colorful.bold_green(f"  model found at: {model_path}"))
        return True
    else:
        print(colorful.bold_red(f"  could not find model at: {model_path}"))
Beispiel #8
0
def test_step_matches(match_config, steps):
    """
    Test if the given match config matches the actual
    matched step implementations.
    """
    failed = 0
    passed = 0

    for item in match_config:
        validate_config_item(item)

        sentence = item['sentence']
        expected_step = item['should_match']

        sys.stdout.write('{0} STEP "{1}" SHOULD MATCH {2}    '.format(
            colorful.yellow('>>'), colorful.cyan(sentence),
            colorful.cyan(expected_step)))

        result = match_step(item['sentence'], steps)
        if not result:
            output_failure(
                None,
                ['Expected sentence didn\'t match any step implementation'])
            failed += 1
            continue

        if expected_step != result.func.__name__:
            output_failure(result.func, [
                'Expected sentence matched {0} instead of {1}'.format(
                    result.func.__name__, expected_step)
            ])
            failed += 1
            continue

        expected_arguments = item.get('with_arguments')

        if expected_arguments:
            arguments = merge_step_args(result)
            expected_arguments = {
                k: v
                for expected_arguments in expected_arguments
                for k, v in expected_arguments.items()
            }
            argument_errors = check_step_arguments(expected_arguments,
                                                   arguments)
            if argument_errors:
                output_failure(result.func, argument_errors)
                failed += 1
                continue

        # check if arguments match
        print(u(colorful.bold_green(u'✔')))
        passed += 1

    return failed, passed
Beispiel #9
0
 def log(msg: str, tp: int = 1, name: str = "NNG One", force: bool = False):
     self = Logger
     try:
         debug = NNGRawSaves().debug
     except Exception:
         debug = self.level.none
     if tp == self.level.success:
         print(cf.bold_green(f"[{name}] {msg}"))
     elif tp == self.level.warn:
         print(cf.bold_blue(f"[{name}] {msg}"))
     elif tp == self.level.tagged_success:
         print(cf.bold_green(f"[{name}] {Globals.tag} | {msg}"))
     elif (tp == self.level.error
           and debug >= self.config_logging_level.log_errors) or force:
         print(cf.bold_red(f"[{name}] [ERROR] {msg}"))
     elif (tp == self.level.debug
           and debug >= self.config_logging_level.log_debug) or force:
         print(cf.bold_purple(f"[{name}] [DEBUG] {msg}"))
     elif tp == self.level.hell and debug >= self.config_logging_level.log_hell:
         print(cf.bold_grey(f"{name} [HELL] {msg}"))
Beispiel #10
0
    def do_snapshot(self, line):
        """ Load/Save a snapshot """
        l = line.split()
        usage = "snapshot load|save ini|file"
        if len(l) != 3:
            print usage
            return

        if l[0] == "load":
            if l[1] == "ini":
                bin_type = armcpu_const.INI_BIN
            elif l[1] == "file":
                bin_type = armcpu_const.SNAPSHOT_BIN
            else:
                print usage
                return
        else:
            print usage
            return

        self.arm_dbg = armcpu.ArmCPU(l[2], bin_type)
        print colorful.bold_green("Loaded snapshot: {}".format(l[2]))
Beispiel #11
0
def current_lesson(lesson):
    print("---")
    print(colorful.bold_green("Weekday: ") + days[weekday])
    print(colorful.bold_green("Current time: ") + str(now.strftime("%H:%M")))
    print("---")
    print(colorful.bold_green("Lesson: ") + lesson["name"])
    print(colorful.bold_green("Teacher: ") + lesson["teacher"])
    print(colorful.bold_green("Room: ") + lesson["room"])
    print(
        colorful.bold_green("Starttime: ") + str(lesson["starttimehour"]) +
        ":" + str(lesson["starttimeminute"]))
    print(
        colorful.bold_green("Endtime: ") + str(lesson["endtimehour"]) + ":" +
        str(lesson["endtimeminute"]))
    print(colorful.bold_green("Time till break: ") + str(endtime - now))
    print("---")
Beispiel #12
0
def check_filters():
    newline()
    subheader("Verifying thumbor filters...")
    errors = []

    for filter_name in BUILTIN_FILTERS:
        try:
            import_module(filter_name)
            print(cf.bold_green("✅ %s" % filter_name))
        except ImportError as error:
            print(cf.bold_red("❎ %s" % filter_name))
            errors.append(error)

    return errors
Beispiel #13
0
def check_compiled_extensions():
    newline()
    subheader("Verifying thumbor compiled extensions...")
    errors = []

    for extension in BUILTIN_EXTENSIONS:
        ext_name = extension.replace("thumbor.ext.filters.", "")
        try:
            import_module(extension)
            print(cf.bold_green("✅ %s" % ext_name))
        except ImportError as error:
            print(cf.bold_red("❎ %s" % ext_name))
            errors.append(error)

    return errors
Beispiel #14
0
    def run(self):
        self.initialize_params()
        self.save_model(0)

        while self.epoch <= self.n_epochs:
            self.train_epoch()
            self.test()
            self.epoch += 1

        print(
            colorful.bold_green(
                f'\n====> Best Epoch: {self.best_epoch}').styled_string)
        best_checkpoint_path = os.path.join(self.save_dir,
                                            str(self.best_epoch) + '.pkl')
        self.importance_sample(best_checkpoint_path)
        self.writer.close()
Beispiel #15
0
 def captcha_handler(self, captcha):
     webbrowser.open_new(captcha.get_url())
     key = input(
         cf.bold_green(
             f"[NNG Framework] [Captcha Handler] Введите код каптчи: {captcha.get_url()}: "
         )
     )
     try:
         return captcha.try_again(key)
     except vk_api.exceptions.Captcha as capt:
         lg.log(
             "[Captcha Handler] Неверный код каптчи! Попробуйте снова.",
             3,
             self.name,
         )
         self.captcha_handler(capt)
Beispiel #16
0
def main():
    os.environ['PYTHONUNBUFFERED'] = '1'
    process = subprocess.Popen(['python3', 'infiloop.py'],
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               universal_newlines=True,
                               bufsize=1)
    while process.poll() is None:
        line = process.stdout.readline()
        if line:
            if '-=>' in line:  # Assume sophisticated pattern
                print(colorful.bold_green(line.strip()))
            else:
                print(line.strip())
        line = process.stderr.readline()
        if line:
            print(colorful.red(line.strip()))
Beispiel #17
0
def import_xmls_into_db():
    """
    import all xml rms tasks from db/xml_import directory into db
    """
    found_rms_task_xml = False
    for entry in os.scandir(rms_paths.xml_imp):
        if not entry.is_file():
            continue
        if not entry.name.endswith(".xml"):
            continue
        with open(rms_paths.xml_imp / entry.name, "r",
                  encoding="utf-16le") as xml_task:
            xml_content = xml_task.read()
        re_process_model = re.compile("process_model.py")
        is_rms_task = re.findall(re_process_model, xml_content)
        if is_rms_task:
            print(colorful.bold_green(f"  processing xml: {entry.name}"))
            found_rms_task_xml = True
            cmd_tokens = {"args": {}, "opts": []}
            re_args = re.compile("Arguments>(.+)</Arguments")
            re_opts = re.compile("(--.+)", )
            re_start = re.compile("StartBoundary>.+T(.+)</StartBoundary")
            arguments = re.findall(re_args, xml_content)
            options = re.findall(re_opts, arguments[0])[0]
            cmd_args = arguments[0].split("--")[0].split()
            cmd_tokens["args"]["<command>"] = cmd_args[1]
            cmd_tokens["args"]['<project_code>'] = cmd_args[2]
            cmd_tokens["args"]["<full_model_path>"] = cmd_args[3]
            cmd_tokens["args"][">start_time"] = re.findall(
                re_start, xml_content)[0]
            cmd_tokens["opts"] = [
                "--" + tok.strip() for tok in options.split("--") if tok
            ]
            # print(f"  found {cmd_tokens}")
            db_job_dict = serdes(
                cmd_tokens=cmd_tokens)  # {"args": args, "opts": options})
            pprint(db_job_dict)
            rms_db.upsert(
                db_job_dict, (Query()["<project_code>"]
                              == cmd_tokens["args"]['<project_code>']) &
                (Query()["<command>"] == cmd_tokens["args"]["<command>"]))
            print("  added/updated in db.")
    if not found_rms_task_xml:
        print(
            colorful.bold_red(
                f"  could not find rms task xml in: {rms_paths.db}"))
Beispiel #18
0
def test_problem(problem, log=True):
    if log: print(f"👷‍ Testing {problem}...")

    lang = language_detector.determine_language(problem)
    lang_config = language_detector.get_config(lang)
    if log:
        print(f"👷‍ Language = {lang_config.kattis_name} {lang_config.emoji}\n")

    inputs = glob.glob(f"./{problem}/*.in")

    count = 0
    failed = False
    for input in inputs:
        count += 1
        if log:
            print(f"🔎 Test number {count}:")

        input_file = open(input, "rb")
        input_content = input_file.read()

        program_path = f"./{problem}/solution.{lang_config.file_extension}"
        output_string = lang_config.run_program(program_path, input_content)

        answer = input.replace('.in', '.ans')
        answer_file = open(answer, "r")
        answer_content = answer_file.read()

        if output_string.replace("\r\n", "\n") != answer_content.replace(
                "\r\n", "\n"):
            if log:
                print(cf.bold_red("❌ Failed..."))
                print("__________INPUT____________")
                print(input_content.decode('utf-8'))
                print("__________INPUT____________")
                print(cf.red("__________OUTPUT___________"))
                print(cf.red(output_string))
                print(cf.red("__________OUTPUT___________"))
                print("__________EXPECTED_________")
                print(answer_content)
                print("__________EXPECTED_________")

            failed = True
        elif log:
            print(cf.bold_green("✅ Test succesful!\n"))

    return not failed
Beispiel #19
0
def test_step_not_match(sentence, expected_not_matching_step, steps):
    step_to_print = colorful.cyan(
        expected_not_matching_step) if expected_not_matching_step else 'ANY'
    sys.stdout.write('{0} STEP "{1}" SHOULD NOT MATCH {2}    '.format(
        colorful.yellow('>>'), colorful.cyan(sentence), step_to_print))

    result = match_step(sentence, steps)
    if result:
        if not expected_not_matching_step or result.func.__name__ == expected_not_matching_step:
            output_failure(None, [
                'Expected sentence did match {0} but it shouldn\'t'.format(
                    expected_not_matching_step)
            ])
            return False

    print(u(colorful.bold_green(u'✔')))
    return True
Beispiel #20
0
def main():
    """Converts a given url with the specified arguments."""

    options = get_options()

    cf.use_style("solarized")
    if options["nocolor"]:
        cf.disable()

    newline()
    header("Thumbor v%s (of %s)" % (__version__, __release_date__))

    newline()
    print(
        "Thumbor doctor will analyze your install and verify if everything is working as expected."
    )

    errors = check_modules()
    errors += check_compiled_extensions()
    errors += check_filters()
    errors += check_extensions()

    newline()

    if errors:
        print(
            cf.bold_red(
                "😞 Oh no! We found some things that could improve... 😞"))
        newline()
        print("\n".join(["* %s" % str(err) for err in errors]))
        newline()
        newline()
        print(
            cf.cyan(
                "If you don't know how to fix them, please open an issue with thumbor."
            ))
        print(
            cf.cyan(
                "Don't forget to copy this log and add it to the description of your issue."
            ))
        print("Open an issue at https://github.com/thumbor/thumbor/issues/new")
        sys.exit(1)
        return

    print(cf.bold_green("🎉 Congratulations! No errors found! 🎉"))
Beispiel #21
0
def table_show_registers(regs_list):
    """Show registers in a table."""
    table_data = [[r for r in regs_list[:4]], [r for r in regs_list[4:8]],
                  [r for r in regs_list[8:12]], [r for r in regs_list[12:16]]]

    table_title = colorful.bold_green("ARM Registers")
    table_instance = SingleTable(table_data, table_title)
    table_instance.inner_heading_row_border = False
    table_instance.inner_row_border = True
    table_instance.justify_columns = {
        0: 'center',
        1: 'center',
        2: 'center',
        3: 'center'
    }

    print ""
    print table_instance.table
    print ""
Beispiel #22
0
def test_step_not_match(sentence, expected_not_matching_step, steps):
    step_to_print = (colorful.cyan(expected_not_matching_step)
                     if expected_not_matching_step else "ANY")
    sys.stdout.write('{0} STEP "{1}" SHOULD NOT MATCH {2}    '.format(
        colorful.yellow(">>"), colorful.cyan(sentence), step_to_print))

    result = match_step(sentence, steps)
    if result:
        if (not expected_not_matching_step
                or result.func.__name__ == expected_not_matching_step):
            output_failure(
                None,
                [
                    "Expected sentence did match {0} but it shouldn't".format(
                        expected_not_matching_step)
                ],
            )
            return False

    print(u(colorful.bold_green("✔")))
    return True
Beispiel #23
0
    def train_epoch(self):
        # set to train mode
        self.model.train()
        train_loss = 0
        train_loader = self.dataset.train_loader

        epoch_start_time = time.time()

        for batch_idx, (data, _) in enumerate(train_loader):
            data = data.cuda()
            data = self.dataset.preprocess(data)

            if batch_idx == 0 and self.epoch == 1:
                self.model.write_summary(data, self.writer, 0)

            self.optimizer.zero_grad()
            loss = self.model(data)
            loss.backward()

            train_loss += loss.item() * len(data)
            assert not np.isnan(loss.item())

            self.optimizer.step()

            if self.log_interval is not None:
                if batch_idx % self.log_interval == 0:
                    print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.
                          format(self.epoch, batch_idx * len(data),
                                 len(train_loader.dataset),
                                 100. * batch_idx / len(train_loader),
                                 loss.item()))

        duration = time.time() - epoch_start_time
        epoch_loss = train_loss / len(train_loader.dataset)
        print(
            colorful.bold_green(
                '====> Epoch: {} Average loss: {:.4f} Duration(sec): {}'.
                format(self.epoch, epoch_loss, duration)).styled_string)
        self.writer.add_scalar('train/loss', epoch_loss, self.epoch)
        self.model.write_summary(data, self.writer, self.epoch)
def purge_old(directory: Path, extension: str, threshold_age_days=60):
    """
    deletes all files with specified extension older than the threshold_age_days
    :param directory: path to search
    :param extension: file extension to filter by
    :param threshold_age_days: max file age date modified
    :return:
    """
    found = 0
    now = time.time()
    for node in directory.iterdir():
        if node.suffix == f".{extension}":
            file_modified = node.stat().st_mtime
            if (now - file_modified) // (24 * 3600) >= threshold_age_days:
                node.unlink()
                found += 1

    if found > 0:
        print(
            col.bold_green(
                f" cleaned-up {found} {extension} files older than: {threshold_age_days} in {directory}"
            ))
Beispiel #25
0
def test_step_match(sentence, expected_step, expected_arguments, steps):
    sys.stdout.write(
        '{0} STEP "{1}" SHOULD MATCH {2}    '.format(
            colorful.yellow(">>"), colorful.cyan(sentence), colorful.cyan(expected_step)
        )
    )

    result = match_step(sentence, steps)
    if not result:
        output_failure(None, ["Expected sentence didn't match any step implementation"])
        return False

    if expected_step != result.func.__name__:
        output_failure(
            result.func,
            [
                "Expected sentence matched {0} instead of {1}".format(
                    result.func.__name__, expected_step
                )
            ],
        )
        return False

    if expected_arguments:
        arguments = merge_step_args(result)
        expected_arguments = {
            k: v
            for expected_arguments in expected_arguments
            for k, v in expected_arguments.items()
        }
        argument_errors = check_step_arguments(expected_arguments, arguments)
        if argument_errors:
            output_failure(result.func, argument_errors)
            return False

    print(u(colorful.bold_green("✔")))
    return True
Beispiel #26
0
    def importance_sample(self, checkpoint):
        self.model.load_state_dict(torch.load(checkpoint))
        self.model.eval()
        test_loglikelihood = 0
        test_loader = self.dataset.test_loader
        with torch.no_grad():
            for i, (data, _) in enumerate(test_loader):
                data = data.cuda()
                data = self.dataset.preprocess(data)

                if models.n_importance_sample > 1000:
                    for data_ in data:
                        test_loglikelihood += self.model.importance_sample(
                            data_.unsqueeze(0))
                else:
                    test_loglikelihood += self.model.importance_sample(data)

        test_loglikelihood /= len(test_loader.dataset)

        print(
            colorful.bold_green('====> Test set loglikelihood: {:.4f}'.format(
                test_loglikelihood)).styled_string)
        self.writer.add_scalar('test/loglikelihood', test_loglikelihood,
                               self.best_epoch)
Beispiel #27
0
 def get_int_choose(
     self,
     show_string: str,
     maxValue: int,
     minValue: int = 1,
     custom_error: str = None,
 ):
     value = str(input(cf.bold_green(f"{show_string}: ")))
     if not value.isdigit():
         lg.log(custom_error or "Введёная строка — не число", lg.level.warn)
         return self.get_int_choose(
             show_string, maxValue, minValue=minValue, custom_error=custom_error
         )
     if maxValue < int(value):
         lg.log(custom_error or f"Максимум этого числа — {maxValue}", lg.level.warn)
         return self.get_int_choose(
             show_string, maxValue, minValue=minValue, custom_error=custom_error
         )
     if int(value) < minValue:
         lg.log(custom_error or f"Минимум этого числа — {minValue}", lg.level.warn)
         return self.get_int_choose(
             show_string, maxValue, minValue=minValue, custom_error=custom_error
         )
     return int(value)
Beispiel #28
0
def test_step_matches_configs(
    match_config_files, basedirs, cover_min_percentage=None, cover_show_missing=False
):
    """
    Test if the given match config files matches the actual
    matched step implementations.
    """
    if cover_min_percentage is not None and float(cover_min_percentage) > 100:
        sys.stderr.write(
            str(
                colorful.magenta(
                    "You are a little cocky to think you can reach a minimum coverage of {0:.2f}%\n".format(
                        float(cover_min_percentage)
                    )
                )
            )
        )
        return 3

    # load user's custom python files
    for basedir in basedirs:
        load_modules(basedir)

    steps = StepRegistry().steps

    if not steps:
        sys.stderr.write(
            str(
                colorful.magenta(
                    "No step implementations found in {0}, thus doesn't make sense to continue".format(
                        basedirs
                    )
                )
            )
        )
        return 4

    failed = 0
    passed = 0
    covered_steps = set()

    for match_config_file in match_config_files:
        # load the given match config file
        with codecs.open(match_config_file, "r", "utf-8") as f:
            match_config = yaml.safe_load(f)

        if not match_config:
            print(
                colorful.magenta(
                    "No sentences found in {0} to test against".format(
                        match_config_file
                    )
                )
            )
            return 5

        print(
            colorful.yellow(
                "Testing sentences from {0}:".format(
                    colorful.bold_yellow(match_config_file)
                )
            )
        )
        failed_sentences, passed_senteces = test_step_matches(match_config, steps)
        failed += failed_sentences
        passed += passed_senteces

        covered_steps = covered_steps.union(
            x["should_match"] for x in match_config if "should_match" in x
        )

        # newline
        sys.stdout.write("\n")

    report = colorful.bold_white("{0} sentences (".format(failed + passed))
    if passed > 0:
        report += colorful.bold_green("{0} passed".format(passed))

    if passed > 0 and failed > 0:
        report += colorful.bold_white(", ")

    if failed > 0:
        report += colorful.bold_red("{0} failed".format(failed))
    report += colorful.bold_white(")")
    print(report)

    step_coverage = 100.0 / len(steps) * len(covered_steps)
    coverage_report = colorful.bold_white(
        "Covered {0} of {1} step implementations".format(len(covered_steps), len(steps))
    )

    ret = 0 if failed == 0 else 1

    if cover_min_percentage:
        coverage_color = (
            colorful.bold_green
            if step_coverage >= float(cover_min_percentage)
            else colorful.bold_red
        )
        coverage_report += colorful.bold_white(" (coverage: ")
        coverage_report += coverage_color("{0:.2f}%".format(step_coverage))
        if float(cover_min_percentage) > step_coverage:
            coverage_report += colorful.bold_white(
                ", expected a minimum of {0}".format(
                    colorful.bold_green(cover_min_percentage + "%")
                )
            )
            if failed == 0:
                ret = 2
            # if tests have passed and coverage is too low we fail with exit code 2
        coverage_report += colorful.bold_white(")")

    print(coverage_report)

    if cover_show_missing:
        missing_steps = get_missing_steps(steps, covered_steps)
        if missing_steps:
            missing_step_report = colorful.bold_yellow("Missing steps:\n")
            for step in missing_steps:
                missing_step_report += "- {0} at ".format(colorful.cyan(step[0]))
                missing_step_report += colorful.cyan(step[1]) + "\n"
            sys.stdout.write(missing_step_report)

    return ret
Beispiel #29
0
def test_step_matches_configs(match_config_files,
                              basedirs,
                              cover_min_percentage=None,
                              cover_show_missing=False):
    """
    Test if the given match config files matches the actual
    matched step implementations.
    """
    if cover_min_percentage is not None and float(cover_min_percentage) > 100:
        sys.stderr.write(
            str(
                colorful.magenta(
                    'You are a little cocky to think you can reach a minimum coverage of {0:.2f}%\n'
                    .format(float(cover_min_percentage)))))
        return 3

    # load user's custom python files
    for basedir in basedirs:
        load_modules(basedir)

    steps = StepRegistry().steps

    if not steps:
        sys.stderr.write(
            str(
                colorful.magenta(
                    'No step implementations found in {0}, thus doesn\'t make sense to continue'
                    .format(basedirs))))
        return 4

    failed = 0
    passed = 0
    covered_steps = set()

    for match_config_file in match_config_files:
        # load the given match config file
        with codecs.open(match_config_file, "r", "utf-8") as f:
            match_config = yaml.safe_load(f)

        if not match_config:
            print(
                colorful.magenta(
                    'No sentences found in {0} to test against'.format(
                        match_config_file)))
            return 5

        print(
            colorful.yellow('Testing sentences from {0}:'.format(
                colorful.bold_yellow(match_config_file))))
        failed_sentences, passed_senteces = test_step_matches(
            match_config, steps)
        failed += failed_sentences
        passed += passed_senteces

        covered_steps = covered_steps.union(x['should_match']
                                            for x in match_config
                                            if 'should_match' in x)

        # newline
        sys.stdout.write('\n')

    report = colorful.bold_white('{0} sentences ('.format(failed + passed))
    if passed > 0:
        report += colorful.bold_green('{0} passed'.format(passed))

    if passed > 0 and failed > 0:
        report += colorful.bold_white(', ')

    if failed > 0:
        report += colorful.bold_red('{0} failed'.format(failed))
    report += colorful.bold_white(')')
    print(report)

    step_coverage = 100.0 / len(steps) * len(covered_steps)
    coverage_report = colorful.bold_white(
        'Covered {0} of {1} step implementations'.format(
            len(covered_steps), len(steps)))

    ret = 0 if failed == 0 else 1

    if cover_min_percentage:
        coverage_color = colorful.bold_green if step_coverage >= float(
            cover_min_percentage) else colorful.bold_red
        coverage_report += colorful.bold_white(' (coverage: ')
        coverage_report += coverage_color('{0:.2f}%'.format(step_coverage))
        if float(cover_min_percentage) > step_coverage:
            coverage_report += colorful.bold_white(
                ', expected a minimum of {0}'.format(
                    colorful.bold_green(cover_min_percentage + '%')))
            if failed == 0:
                ret = 2
            # if tests have passed and coverage is too low we fail with exit code 2
        coverage_report += colorful.bold_white(')')

    print(coverage_report)

    if cover_show_missing:
        missing_steps = get_missing_steps(steps, covered_steps)
        if missing_steps:
            missing_step_report = colorful.bold_yellow('Missing steps:\n')
            for step in missing_steps:
                missing_step_report += '- {0} at '.format(
                    colorful.cyan(step[0]))
                missing_step_report += colorful.cyan(step[1]) + '\n'
            sys.stdout.write(missing_step_report)

    return ret
Beispiel #30
0
    def console_write(self, features, marker):
        """
            Writes the endreport for all features

            :param list features: all features
        """
        stats = {
            "features": {
                "amount": 0,
                "passed": 0,
                "failed": 0,
                "skipped": 0,
                "untested": 0,
                "pending": 0,
            },
            "scenarios": {
                "amount": 0,
                "passed": 0,
                "failed": 0,
                "skipped": 0,
                "untested": 0,
                "pending": 0,
            },
            "steps": {
                "amount": 0,
                "passed": 0,
                "failed": 0,
                "skipped": 0,
                "untested": 0,
                "pending": 0,
            },
        }
        pending_steps = []
        duration = timedelta()
        for feature in features:
            if not feature.has_to_run(world.config.scenarios):
                continue
            stats["features"]["amount"] += 1
            stats["features"][feature.state] += 1

            if feature.state in [Step.State.PASSED, Step.State.FAILED]:
                duration += feature.duration

            for scenario in feature.all_scenarios:
                if not scenario.has_to_run(world.config.scenarios):
                    continue

                if isinstance(scenario, ScenarioOutline):  # skip ScenarioOutlines
                    continue
                if isinstance(scenario, ScenarioLoop):  # skip ScenarioLoop
                    continue

                stats["scenarios"]["amount"] += 1
                stats["scenarios"][scenario.state] += 1
                for step in scenario.steps:
                    stats["steps"]["amount"] += 1
                    stats["steps"][step.state] += 1

                    if step.state == Step.State.PENDING:
                        pending_steps.append(step)

        colored_closing_paren = colorful.bold_white(")")
        colored_comma = colorful.bold_white(", ")
        passed_word = colorful.bold_green("{0} passed")
        failed_word = colorful.bold_red("{0} failed")
        skipped_word = colorful.cyan("{0} skipped")
        pending_word = colorful.bold_yellow("{0} pending")

        output = colorful.bold_white(
            "{0} features (".format(stats["features"]["amount"])
        )
        output += passed_word.format(stats["features"]["passed"])
        if stats["features"]["failed"]:
            output += colored_comma + failed_word.format(stats["features"]["failed"])
        if stats["features"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["features"]["skipped"])
        if stats["features"]["pending"]:
            output += colored_comma + pending_word.format(stats["features"]["pending"])
        output += colored_closing_paren

        output += "\n"
        output += colorful.bold_white(
            "{} scenarios (".format(stats["scenarios"]["amount"])
        )
        output += passed_word.format(stats["scenarios"]["passed"])
        if stats["scenarios"]["failed"]:
            output += colored_comma + failed_word.format(stats["scenarios"]["failed"])
        if stats["scenarios"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["scenarios"]["skipped"])
        if stats["scenarios"]["pending"]:
            output += colored_comma + pending_word.format(stats["scenarios"]["pending"])
        output += colored_closing_paren

        output += "\n"
        output += colorful.bold_white("{} steps (".format(stats["steps"]["amount"]))
        output += passed_word.format(stats["steps"]["passed"])
        if stats["steps"]["failed"]:
            output += colored_comma + failed_word.format(stats["steps"]["failed"])
        if stats["steps"]["skipped"]:
            output += colored_comma + skipped_word.format(stats["steps"]["skipped"])
        if stats["steps"]["pending"]:
            output += colored_comma + pending_word.format(stats["steps"]["pending"])
        output += colored_closing_paren

        if pending_steps:
            sr = StepRegistry()
            pending_step_implementations = make_unique_obj_list(
                pending_steps, lambda x: x.definition_func
            )
            output += colorful.white(
                "\nYou have {0} pending step implementation{1} affecting {2} step{3}:\n  {4}\n\nNote: this could be the reason for some failing subsequent steps".format(
                    len(pending_step_implementations),
                    "s" if len(pending_step_implementations) is not 1 else "",
                    len(pending_steps),
                    "s" if len(pending_steps) is not 1 else "",
                    "\n  ".join(
                        [
                            "-  '{0}' @ {1}".format(
                                sr.get_pattern(s.definition_func),
                                get_func_code(s.definition_func).co_filename,
                            )
                            for s in pending_step_implementations
                        ]
                    ),
                )
            )

        output += "\n"

        if world.config.wip:
            if stats["scenarios"]["passed"] > 0:
                output += colorful.red(
                    "\nThe --wip switch was used, so I didn't expect anything to pass. These scenarios passed:\n"
                )

                has_passed_scenarios = False
                for feature in features:
                    passed_scenarios = list(
                        filter(
                            lambda s: s.state == Step.State.PASSED,
                            feature.all_scenarios,
                        )
                    )
                    for scenario in passed_scenarios:
                        output += colorful.red(
                            "\n - {}: {}".format(feature.path, scenario.sentence)
                        )
                        has_passed_scenarios = True

                if has_passed_scenarios:
                    output += "\n"
            else:
                output += colorful.green(
                    "\nThe --wip switch was used, so the failures were expected. All is good.\n"
                )

        output += colorful.cyan(
            "Run {0} finished within {1}".format(
                marker, humanize.naturaldelta(duration)
            )
        )

        write(output)
Beispiel #31
0
def main():
    # Argument passing/parsing
    args, model_args = config_utils.initialize_argparser(
        MODELS, _command_args, custom_argparsers.DialogArgumentParser)
    hparams, hparams_dict = config_utils.create_or_load_hparams(
        args, model_args, args.cfg)
    pprint(hparams_dict)

    # Set environment variables & gpus
    set_logger()
    set_gpus(hparams.gpus)
    set_tcmalloc()
    gpus = tf.config.experimental.list_physical_devices('GPU')
    tf.config.experimental.set_visible_devices(gpus, 'GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)

    # Set random seed
    tf.random.set_seed(hparams.random_seed)
    np.random.seed(hparams.random_seed)
    random.seed(hparams.random_seed)

    # For multi-gpu
    if hparams.num_gpus > 1:
        mirrored_strategy = tf.distribute.MirroredStrategy()  # NCCL will be used as default
    else:
        mirrored_strategy = None

    # Download BERT pretrained model
    if not os.path.exists(hparams.bert_dir):
        os.makedirs(hparams.bert_dir)
        fname = 'uncased_L-12_H-768_A-12.zip'
        gd_id = '17rfV9CleFBwwfS7m5Yd72vvxdPLWBHl6'
        download_from_google_drive(gd_id, os.path.join(hparams.bert_dir, fname))
        unzip(hparams.bert_dir, fname)

    # Make dataset reader
    os.makedirs(hparams.cache_dir, exist_ok=True)
    if hparams.data_name == "wizard_of_wikipedia":
        reader_cls = WowDatasetReader
    elif hparams.data_name == "holle":
        reader_cls = HolleDatasetReader
    else:
        raise ValueError("data_name must be one of 'wizard_of_wikipedia' and 'holle'")
    reader = reader_cls(
        hparams.batch_size, hparams.num_epochs,
        buffer_size=hparams.buffer_size,
        bucket_width=hparams.bucket_width,
        max_length=hparams.max_length,
        max_episode_length=hparams.max_episode_length,
        max_knowledge=hparams.max_knowledge,
        knowledge_truncate=hparams.knowledge_truncate,
        cache_dir=hparams.cache_dir,
        bert_dir=hparams.bert_dir,
    )
    train_dataset, iters_in_train = reader.read('train', mirrored_strategy)
    test_dataset, iters_in_test = reader.read('test', mirrored_strategy)
    if hparams.data_name == 'wizard_of_wikipedia':
        unseen_dataset, iters_in_unseen = reader.read('test_unseen', mirrored_strategy)
    vocabulary = reader.vocabulary

    # Build model & optimizer & trainer
    if mirrored_strategy:
        with mirrored_strategy.scope():
            model = MODELS[hparams.model](hparams, vocabulary)
            optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr,
                                                 clipnorm=hparams.clipnorm)
    else:
        model = MODELS[hparams.model](hparams, vocabulary)
        optimizer = tf.keras.optimizers.Adam(learning_rate=hparams.init_lr,
                                                clipnorm=hparams.clipnorm)
    trainer = Trainer(model, optimizer, mirrored_strategy,
                      hparams.enable_function,
                      WowDatasetReader.remove_pad)

    # misc (tensorboard, checkpoints)
    file_writer = tf.summary.create_file_writer(hparams.checkpoint_dir)
    file_writer.set_as_default()
    global_step = tf.compat.v1.train.get_or_create_global_step()
    checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                    model=model,
                                    optimizer_step=global_step)
    checkpoint_manager = tf.train.CheckpointManager(checkpoint,
                                                    directory=hparams.checkpoint_dir,
                                                    max_to_keep=hparams.max_to_keep)
    checkpoint_tracker = CheckpointTracker(
        hparams.checkpoint_dir, max_to_keep=BEST_N_CHECKPOINTS)

    # Main loop!
    train_dataset_iter = iter(train_dataset)
    for epoch in range(hparams.num_epochs):
        print(hparams.checkpoint_dir)
        base_description = f"(Train) Epoch {epoch}, GPU {hparams.gpus}"
        train_tqdm = trange(iters_in_train, ncols=120, desc=base_description)
        for current_step in train_tqdm:
            example = next(train_dataset_iter)
            global_step.assign_add(1)
            _global_step = int(global_step)

            # Train
            output_dict = trainer.train_step(example)

            # Print model
            if _global_step == 1:
                model.print_model()

            loss_str = str(output_dict['loss'].numpy())
            train_tqdm.set_description(f"{base_description}, Loss {loss_str}")
            with file_writer.as_default():
                if _global_step % int(hparams.logging_step) == 0:
                    tf.summary.histogram('train/vocab', output_dict['sample_ids'], step=_global_step)
                    tf.summary.scalar('train/loss', output_dict['loss'], step=_global_step)
                    tf.summary.scalar('train/gen_loss', output_dict['gen_loss'], step=_global_step)
                    tf.summary.scalar('train/knowledge_loss', output_dict['knowledge_loss'], step=_global_step)
                    tf.summary.scalar('train/kl_loss', output_dict['kl_loss'], step=_global_step)

            # Test
            if _global_step % int(iters_in_train * hparams.evaluation_epoch) == 0:
                checkpoint_manager.save(global_step)

                test_loop_outputs = trainer.test_loop(test_dataset, iters_in_test, epoch, 'seen')
                if hparams.data_name == 'wizard_of_wikipedia':
                    unseen_loop_outputs = trainer.test_loop(unseen_dataset, iters_in_unseen, epoch, 'unseen')

                test_summaries, log_dict = run_wow_evaluation(
                    test_loop_outputs, hparams.checkpoint_dir, 'seen')
                if hparams.data_name == 'wizard_of_wikipedia':
                    unseen_summaries, unseen_log_dict = run_wow_evaluation(
                        unseen_loop_outputs, hparams.checkpoint_dir, 'unseen')

                # Logging
                tqdm.write(colorful.bold_green("seen").styled_string)
                tqdm.write(colorful.bold_red(pformat(log_dict)).styled_string)
                if hparams.data_name == 'wizard_of_wikipedia':
                    tqdm.write(colorful.bold_green("unseen").styled_string)
                    tqdm.write(colorful.bold_red(pformat(unseen_log_dict)).styled_string)

                with file_writer.as_default():
                    for family, test_summary in test_summaries.items():
                        for key, value in test_summary.items():
                            tf.summary.scalar(f'{family}/{key}', value, step=_global_step)
                    if hparams.data_name == 'wizard_of_wikipedia':
                        for family, unseen_summary in unseen_summaries.items():
                            for key, value in unseen_summary.items():
                                tf.summary.scalar(f'{family}/{key}', value, step=_global_step)

                if hparams.keep_best_checkpoint:
                    current_score = log_dict["rouge1"]
                    checkpoint_tracker.update(current_score, _global_step)
Beispiel #32
0
class ArmjitsuCmd(Cmd):
    """Command dispatch loop"""

    prompt = colorful.bold_green("(armjitsu) ")
    ruler = "-"
    debug = True

    def __init__(self):
        Cmd.__init__(self)

        self.bin_loaded = False
        self.bin_running = False

        self.arm_dbg = None

        if DEBUG_MODE:
            import ipdb; ipdb.set_trace()


    @options([make_option('-l', '--list', action="store_true", help="Show supported binary formats."),
              make_option('-r', '--raw', action="store_true", help="Load ARM RAW/Shellcode from file."),
              make_option('-e', '--elf', action="store_true", help="Load ARM ELF binary from file.")
             ])
    def do_file(self, args, opts=None):
        """
        Load an ARM binary file for emulation and debugging.
        To list ARMjitsu supported binary formats invoke:

        (armjitsu) file --list
        """
        BIN_TYPE = armcpu_const.RAW_BIN
        if opts.raw:
            BIN_TYPE = armcpu_const.RAW_BIN
        elif opts.elf:
            BIN_TYPE = armcpu_const.ELF_BIN

        line = args
        if not line:
            print colorful.yellow("Supply the filename of the binary you wish to load please.")
            return None

        file_name = line if is_file(line) else None
        if not file_name or not BIN_TYPE:
            print colorful.yellow("Error with supplied filename.")
            return False

        self.arm_dbg = armcpu.ArmCPU(file_name, BIN_TYPE)
        self.bin_loaded = True

        print colorful.base1("Loaded binary file: {}".format(file_name))

    # Synonyms for do_file
    do_load = do_file

    # REMOVE AFTER DEV
    def do_testing(self, line):
        self.arm_dbg = armcpu.ArmCPU("armraw.bin", armcpu_const.RAW_BIN)
        self.bin_loaded = True
        print colorful.bold_red("Developer testing mode! armraw.bin loaded!")

        print colorful.base1("Loaded binary file: {}".format(file_name))

    # Synonyms for do_file
    do_load = do_file

    # REMOVE AFTER DEV
    def do_testing(self, line):
        self.arm_dbg = armcpu.ArmCPU("armraw.bin", armcpu_const.RAW_BIN)
        self.bin_loaded = True
        print colorful.bold_red("Developer testing mode! armraw.bin loaded!")

    do_t = do_testing

    def do_run(self, line):
        """Begins execution of ARM binary."""
        if not self.bin_running:
            self.bin_running = True
        else:
            print colorful.yellow("Process is already running.")
            return None

        self.arm_dbg.start_execution()

    do_start = do_run
    do_r = do_run

    def do_continue(self, line):
        """Continue execution from a paused state."""
        self.arm_dbg.continue_execution()

    do_c = do_continue
    do_con = do_continue

    def do_registers(self, line):
        """Display registers."""
        self.arm_dbg.context_registers()

    do_regs = do_registers

    def do_step(self, line):
        self.arm_dbg.stop_next_instruction = False
        self.arm_dbg.use_step_mode = True
        self.arm_dbg.step_execution()

    # TODO: RF - check for error conditions
    def do_x(self, line):
        """Examine memory similar to GDB x/? command"""
        l = line.split()
        byte_count = l[0]
        address = int(l[1], 16)

        # Read memory as byte, half-word, word
        if byte_count == "b":
            size = 1
        elif byte_count == "h":
            size = 2
        elif byte_count == "w":
            size = 4

        # Print our data
        data = self.arm_dbg.read_mem(address, size)
        data_list = []

        for i in data:
            data_list.append("0x{:02x} ".format(i))

        print " ".join(data_list)

    @options([make_option('-l', '--list', action="store_false", help="List all set breakpoints.")])
    def do_break(self, line):
        pass

    def do_snapshot(self, line):
        """ Load/Save a snapshot """
        l = line.split()
        usage = "snapshot load|save ini|file"
        if len(l) != 3:
            print usage
            return

        if l[0] == "load":
            if l[1] == "ini":
                bin_type = armcpu_const.INI_BIN
            elif l[1] == "file":
                bin_type = armcpu_const.SNAPSHOT_BIN
            else:
                print usage
                return
        else:
            print usage
            return

        self.arm_dbg = armcpu.ArmCPU(l[2], bin_type)
        print colorful.bold_green("Loaded snapshot: {}".format(l[2]))

    def do_info(self, line):
        pass

    def do_exit(self, line):
        print "Exiting..."
        return True
Beispiel #33
0
    rps_addin = rvt_journal_writer.write_addin(
        addin_file_path,
        rvt_journal_writer.rps_addin_template,
        rvt_model_version,
    )

    run_proc = rvt_journal_run(rvt_install_path, journal_file_path,
                               paths["root_dir"])
    run_proc_id = run_proc.pid
    run_proc_name = run_proc.name()

    # let's wait a second for rvt process to fire up
    time.sleep(0.5)

    if run_proc.name() == "Revit.exe":
        proc_name_colored = colorful.bold_green(run_proc_name)
    else:
        proc_name_colored = colorful.bold_red(run_proc_name)

    print(f" process info: {run_proc_id} - {proc_name_colored}")

    print(colorful.bold_orange(f"-detected revit: {rvt_model_version}"))
    print(f" version:{rvt_model_version} at path: {rvt_install_path}")

    print(colorful.bold_orange("-process countdown:"))
    print(
        f" timeout until termination of process: {run_proc_id} - {proc_name_colored}:"
    )

    log_journal = get_rvt_proc_journal(run_proc, paths["journals_dir"])
    return_code = None
Beispiel #34
0
Datei: train.py Projekt: yyht/PRS
def train_model(config, model: MLabReservoir, scheduler: DataScheduler,
                writer: SummaryWriter):
    saved_model_path = os.path.join(config['log_dir'], 'ckpts')

    os.makedirs(saved_model_path, exist_ok=True)

    prev_t = config['data_schedule'][0]['subsets'][0][1]
    done_t_num = 0

    results_dict = dict()
    for step, (x, y, t) in enumerate(scheduler):

        summarize = step % config['summary_step'] == 0
        # if we want to evaluate based on steps.
        evaluate = (step % config['eval_step'] == config['eval_step'] - 1)

        # find current task t's id in data_schedule to obtain the data name.
        for data_dict in config['data_schedule']:
            for subset in data_dict['subsets']:
                if subset[1] == t:
                    cur_subset = subset[0]

        # Evaluate the model when task changes
        if t != prev_t:
            done_t_num += 1
            results_dict = scheduler.eval(model,
                                          writer,
                                          step + 1,
                                          prev_t,
                                          eval_title='eval',
                                          results_dict=results_dict)
            # Save the model
            torch.save(
                model.state_dict(),
                os.path.join(saved_model_path,
                             'ckpt-{}'.format(str(step + 1).zfill(6))))

            print(
                colorful.bold_green('\nProgressing to Task %d' %
                                    t).styled_string)

        if step == 0:
            print(
                colorful.bold_green('\nProgressing to Task %d' %
                                    t).styled_string)

        if done_t_num >= len(scheduler.schedule):
            writer.flush()
            return

        # learn the model
        for i in range(config['batch_iter']):
            if 'slab' in config['model_name']:
                model.learn(x,
                            y,
                            t,
                            step * config['batch_iter'] + i,
                            scheduler.datasets[cur_subset].category_map,
                            scheduler.datasets[cur_subset].split_cats_dict,
                            data_obj=scheduler.datasets[cur_subset])
            else:
                model.learn(x,
                            y,
                            t,
                            step * config['batch_iter'] + i,
                            scheduler.datasets[cur_subset].category_map,
                            scheduler.datasets[cur_subset].split_cats_dict,
                            data_obj=scheduler.datasets[cur_subset].subsets[t])

        prev_t = t