Example #1
0
 def func(cmd):
     cmd = [f"{x}" for x in cmd]
     try:
         sp_run(cmd, capture_output=True, check=True, text=True)
     except CalledProcessError as ex:
         fail(
             f"""\nCMD: {" ".join(cmd)}\nSTDOUT: {ex.stdout}\nSTDERR: {ex.stderr}""",
             False,
         )
     except Exception as ex:
         fail(f"""CMD: {" ".join(cmd)}\nEXCEPTION: {ex}""")
Example #2
0
def test_code():
    with open("/tango/src/demo.R", "w") as r_code:
        r_code.write(request.form["code"])
    with open("/tango/testcases/demo.R", "w") as r_test:
        r_test.write(request.form["tests"])
    sp_run([
        "docker", "run", "--rm", "--mount",
        "type=bind,src=/tmp/tango/src,dst=/home/tango/src,readonly=true",
        "--mount",
        "type=bind,src=/tmp/tango/testcases,dst=/home/tango/testcases,readonly=true",
        "--mount", "type=bind,src=/tmp/tango/out,dst=/home/tango/out",
        "awoolley10/tango-demo"
    ])
    with open("/tango/out/validation.json") as v_json:
        validation_result = from_validation(load_json(v_json)["runners"][0])
    if len(validation_result["successes"]) == 3:
        with open("/tango/out/quality.json") as q_json:
            q_res = load_json(q_json)["runners"][0]
        q_e = from_quality(q_res["errors"])  # Quality errors
        q_score = round(100 * q_res["score"])

        with open("/tango/out/evaluation.json") as e_json:
            e_res = load_json(e_json)["runners"][0]
        try:
            e_s = e_res["successes"]
        except KeyError:
            e_s = []
        try:
            e_f = e_res["failures"]
        except KeyError:
            e_f = []
        try:
            e_e = e_res["errors"]
        except KeyError:
            e_e = []
    else:
        q_e = None
        q_score = None
        e_s = None
        e_f = None
        e_e = None

    return render_template("feedback.j2",
                           v_s=validation_result["successes"],
                           v_f=validation_result["failures"],
                           v_e=validation_result["errors"],
                           q_score=q_score,
                           q_e=q_e,
                           e_s=e_s,
                           e_f=e_f,
                           e_e=e_e)
Example #3
0
def run_ignition_stage(config_file: str, stage: str):
    print("Running stage %s with config file %s" % (stage, config_file))
    cmd = [
        IGNITION_BINARY_PATH,
        "--platform",
        "file",
        "--stage",
        stage,
        "--log-to-stdout",
    ]
    procenv = os.environ.copy()
    procenv["IGNITION_CONFIG_FILE"] = config_file
    procenv["IGNITION_WRITE_AUTHORIZED_KEYS_FRAGMENT"] = "false"

    sp_run(cmd, env=procenv)
Example #4
0
 def patch_notes_handler(self, repo_cog_hash_pairs):
     for repo, cog, oldhash in repo_cog_hash_pairs:
         repo_path = os.path.join('data', 'downloader', repo)
         cogfile = os.path.join(cog, cog + ".py")
         cmd = ["git", "-C", repo_path, "log", "--relative-date",
                "--reverse", oldhash + '..', cogfile
                ]
         try:
             log = sp_run(cmd, stdout=PIPE).stdout.decode().strip()
             yield self.format_patch(repo, cog, log)
         except:
             pass
 def patch_notes_handler(self, repo_cog_hash_pairs):
     for repo, cog, oldhash in repo_cog_hash_pairs:
         repo_path = os.path.join('data', 'downloader', repo)
         cogfile = os.path.join(cog, cog + ".py")
         cmd = ["git", "-C", repo_path, "log", "--relative-date",
                "--reverse", oldhash + '..', cogfile
                ]
         try:
             log = sp_run(cmd, stdout=PIPE).stdout.decode().strip()
             yield self.format_patch(repo, cog, log)
         except:
             pass
Example #6
0
 def patch_notes_handler(self, repo_cog_hash_pairs):
     for repo, cog, oldhash in repo_cog_hash_pairs:
         pathsplit = self.repos[repo][cog]['file'].split('/')
         repo_path = os.path.join(*pathsplit[:-2])
         cogfile = os.path.join(*pathsplit[-2:])
         cmd = ["git", "-C", repo_path, "log", "--relative-date",
                "--reverse", oldhash + '..', cogfile
                ]
         try:
             log = sp_run(cmd, stdout=PIPE).stdout.decode().strip()
             yield self.format_patch(repo, cog, log)
         except:
             pass
Example #7
0
 def patch_notes_handler(self, repo_cog_hash_pairs):
     for repo, cog, oldhash in repo_cog_hash_pairs:
         pathsplit = self.repos[repo][cog]['file'].split('/')
         repo_path = os.path.join(*pathsplit[:-2])
         cogfile = os.path.join(*pathsplit[-2:])
         cmd = [
             "git", "-C", repo_path, "log", "--relative-date", "--reverse",
             oldhash + '..', cogfile
         ]
         try:
             log = sp_run(cmd, stdout=PIPE).stdout.decode().strip()
             yield self.format_patch(repo, cog, log)
         except:
             pass
Example #8
0
def run_pipe(iargs):
    """Runs a SPARK sub-pipeline.
    """

    jobs_pattern = ' '.join([quote(s) for s in iargs['jobs_pattern']])
    cmd = '{} run {} {} {}'.format(
        quote(iargs['exe']), quote(iargs['pipe_file']), iargs['stage'], jobs_pattern)
    p = sp_run(cmd, shell=True, cwd=iargs['out_dir'])
    if p.returncode != 0:
        print('\n\nThe process returned a non-zero exit status:\n' +
              str(p.returncode), file=stderr)
        sys_exit(1)

    return None
Example #9
0
def _OK_completed_process_for_version_check(listener):
    from subprocess import run as sp_run

    e = None
    try:
        cp = sp_run((_EXECUTABLE_NAME, '--version'), capture_output=True)
    except FileNotFoundError as exception:
        e = exception
    if e:
        return _when_not_installed(listener, e)

    if cp.returncode:
        return _when_couldnt(listener, 'check version', cp)

    return cp
Example #10
0
    def is_reachable(self):
        '''
        Is the network device reachable. Basically a ping test.
        
        The ping command assumes that this method will executed on a
        Linux/Unix type computer.
        '''
        result = False
        ping_cmd = ['ping', '-c', self.count, '-W', self.wait_time, '-s',
                    self.packet_size, self.ipaddress]
        spr_cp = sp_run(ping_cmd, stdout=sp_PIPE,
                        stderr=sp_PIPE)
        
        if spr_cp.returncode == 0:
            result = True

        return result
Example #11
0
def check_gram_binary():
    if not Path(gramtools_exec_fpath).exists():
        print(
            f"gramtools backend expected at: {gramtools_exec_fpath} but not found.",
            file=sys.stderr,
        )
        exit(1)
    process = sp_run(
        [gramtools_exec_fpath], stdout=PIPE, stderr=PIPE, universal_newlines=True
    )
    if process.returncode != 0:
        print(
            f"The gramtools backend at {gramtools_exec_fpath} does not seem to work.\n\n"
            f"Stdout: \n{process.stdout.strip()}\n\n",
            f"Stderr: \n{process.stderr.strip()}\n",
            file=sys.stderr,
        )
        exit(1)
Example #12
0
def setup_pipes(iargs):
    """Builds the list of options for running the SPARK analyses with GNU Octave or MATLAB and 
    creates the corresponding full SPARK pipeline files.
    """

    out_dir = os.sep.join([iargs['out_dir'], iargs['fmri'][0]])
    make_dirs(out_dir)

    pipes_dir = os.sep.join([out_dir, 'pipelines'])
    make_dirs(pipes_dir)

    pipe_opt = os.sep.join([pipes_dir, iargs['fmri'][0] + '.opt'])
    with open(pipe_opt, 'w', newline='\n') as file:
        file.write(
            'pipe_file ' + pipe_opt[:-4] + '.mat' + '\n' +
            'fmri_data ' + ' '.join(iargs['fmri'][1:]) + '\n' +
            'out_dir ' + out_dir + '\n' +
            'mask ' + iargs['mask'] + '\n' +
            'nb_resamplings ' + str(iargs['nb_resamplings']) + '\n' +
            'network_scales ' + ' '.join([str(x) for x in iargs['network_scales']]) + '\n' +
            'nb_iterations ' + str(iargs['nb_iterations']) + '\n' +
            'p_value ' + str(iargs['p_value']) + '\n' +
            'resampling_method ' + iargs['resampling_method'] + '\n' +
            'block_window_length ' + ' '.join([str(x) for x in iargs['block_window_length']]) + '\n' +
            'dict_init_method ' + iargs['dict_init_method'] + '\n' +
            'sparse_coding_method ' + iargs['sparse_coding_method'] + '\n' +
            'preserve_dc_atom ' + str(int(iargs['preserve_dc_atom'])) + '\n' +
            'verbose ' + str(int(iargs['verbose'])) + '\n'
        )

    if not os.path.isfile(pipe_opt):
        print('Failed to create/edit the SPARK pipeline options file:\n' +
              pipe_opt, file=stderr)
        sys_exit(1)

    cmd = '{} setup {}'.format(quote(iargs['exe']), quote(pipe_opt))
    p = sp_run(cmd, shell=True, cwd=pipes_dir)
    if p.returncode != 0:
        print('\n\nThe process returned a non-zero exit status:\n' +
              str(p.returncode), file=stderr)
        sys_exit(1)

    return None
Example #13
0
def main(download: bool) -> None:
    log.info(f"VVVVID Downloader {__version__}")

    ffmpeg_path = which("ffmpeg")

    if download and not ffmpeg_path:
        log.critical("FFmpeg non trovato.")
        exit()

    show_id = IntPrompt.ask("ID Show")

    api = Api()
    api.login()

    info = api.info(show_id)

    if not info.exists:
        log.critical("Questo show non esiste.")
        exit()

    log.info(f"Scarico info su [bold]{info.title}[/]...")

    # Get the seasons of the show (this could also contain dubbed versions)
    seasons = api.seasons(show_id)

    # Ask the user the season to download
    questions = [i.name for i in seasons]
    choice = list_input("Seleziona la versione che vuoi scaricare",
                        choices=questions)

    season_flt = list(filter(lambda i: i.name == choice, seasons))
    season = api.season(show_id, season_flt[0].season_id)

    qualities = {}
    questions = []
    if info.video_format.lower() == "sd":
        questions.append("SD")

        if season[0].embed_info:
            qualities.update({"sd": "embed_info"})
        else:
            qualities.update({"sd": "embed_info_sd"})
    else:
        questions.append("HD")
        questions.append("SD")
        qualities.update({"hd": "embed_info", "sd": "embed_info_sd"})

    quality = list_input(f"Seleziona la qualità", choices=questions)
    quality_code = qualities[quality.lower()]

    if len(season) > 1:
        choice = list_input(
            f"{len(season)} episodi disponibili. Li scarico tutti?",
            default="Si",
            choices=["Si", "No"],
        )

        if choice == "No":
            log.info(
                f"Inserisci gli episodi che vuoi scaricare separati da una virgola (,).\nEsempio: 1,4,5"
            )
            answer = None

            while not answer:
                p = Prompt.ask("Episodi", default=None)

                if not p:
                    continue

                answer = [int(i) for i in p.split(",")]

            season = [
                i for index, i in enumerate(season, 1) if index in answer
            ]

    for i in track(season, "Scarico..."):
        show_title = i.show_title
        episode_number = i.number

        embed_code = getattr(season[0], quality_code, "")
        output_dir = Path().joinpath("vvvvid", show_title)
        output_name = re_sub(r"\s", "_",
                             f"{show_title}_Ep_{episode_number}_{quality}")

        log.info(f"Scarico episodio #{episode_number}...")

        response = i.download(embed_code, output_dir, output_name)

        if response == DownloadResponse.HTTP_FAILED:
            log.critical(
                f"Impossibile scaricare [bold]{show_title} - #{episode_number} (ID: {show_id})[/]."
            )
            continue

        if not download:
            continue

        input_full = output_dir.joinpath(f"{output_name}.m3u8").absolute()
        output_mp4 = output_dir.joinpath(f"{output_name}.mp4").absolute()

        if output_mp4.exists():
            log.warning(f"L'episodio {episode_number} è già stato scaricato.")
            continue

        sp_run(
            [
                ffmpeg_path,  # type: ignore
                "-protocol_whitelist",
                "https,file,tls,tcp",
                "-i",
                input_full,
                "-c",
                "copy",
                "-bsf:a",
                "aac_adtstoasc",
                str(output_mp4),
            ], )

    log.info("Download completato.")
Example #14
0
 def run(*args, **kwargs):
     env = os.environ.copy()
     env['GIT_TERMINAL_PROMPT'] = '0'
     kwargs['env'] = env
     return sp_run(*args, **kwargs)
Example #15
0
    def __call__(self):
        user_agent, save_location, ffmpeg_location = self.__config()
        headers = {"User-Agent": user_agent}
        show_id = None

        while not show_id:
            try:
                show_id = int(input(f"{Fore.LIGHTYELLOW_EX}Show ID: {Fore.LIGHTWHITE_EX}"))

                if (show_id < 0):
                    print(f"{Fore.LIGHTRED_EX}[ERRORE] {Fore.LIGHTYELLOW_EX}Non può essere negativo.")
                    show_id = None

            except ValueError:
                print(f"{Fore.LIGHTRED_EX}[ERRORE] {Fore.LIGHTYELLOW_EX}ID non valido.")

        with Session() as s:
            # Get connection ID
            login_response = s.get("https://www.vvvvid.it/user/login", headers=headers)
            conn_id = login_response.json()["data"]["conn_id"]

            # Get info about the show by its ID
            info_response = s.get(f"https://www.vvvvid.it/vvvvid/ondemand/{show_id}/info/?conn_id={conn_id}", headers=headers)
            info_json = info_response.json()

            # If the show ID does not exists, end the script
            if info_json["result"] != "ok":
                message = info_json["message"]
                exit_script(f"{Fore.LIGHTRED_EX}[Errore:] {Fore.LIGHTYELLOW_EX}{message}")

            title = info_json["data"]["title"]
            print(f"{Fore.LIGHTGREEN_EX}[INFO] {Fore.LIGHTYELLOW_EX + title}")

            video_format = info_json["data"]["video_format"]

            # Get the seasons of the show (this could also contain dubbed versions)
            seasons_response = s.get(f"https://www.vvvvid.it/vvvvid/ondemand/{show_id}/seasons/?conn_id={conn_id}", headers=headers)
            seasons_json = seasons_response.json()

            # Ask the user the season to download
            seasons_questions = [i["name"] for i in seasons_json["data"]]
            seasons_choice = inquirer_list(f"{Fore.LIGHTYELLOW_EX}Seleziona la versione che vuoi scaricare{Fore.LIGHTWHITE_EX}", choices=seasons_questions)

            # Get season_id and video_id from the first episode
            season_choice_data = [
                {
                    "season_id": i["episodes"][0]["season_id"],
                    "video_id": i["episodes"][0]["video_id"]
                } for i in seasons_json["data"] if i["name"] == seasons_choice
            ][0]
            season_id = season_choice_data["season_id"]
            video_id = season_choice_data["video_id"]

            # Get the episodes of the selected version
            season_choice_response = s.get(f"https://www.vvvvid.it/vvvvid/ondemand/{show_id}/season/{season_id}?video_id={video_id}&conn_id={conn_id}", headers=headers)
            season_choice_json = season_choice_response.json()
            episodes = season_choice_json["data"]

            available_qualities = {}
            available_qualities_questions = []
            if video_format == "SD":
                available_qualities_questions.append("SD")

                if "embed_info" in episodes[0]:
                    available_qualities.update({"sd": "embed_info"})
                else:
                    available_qualities.update({"sd": "embed_info_sd"})
            else:
                available_qualities_questions.append("HD")
                available_qualities_questions.append("SD")
                available_qualities.update({"hd": "embed_info", "sd": "embed_info_sd"})

            # Ask the user in which quality to download
            quality_choice = inquirer_list(f"{Fore.LIGHTYELLOW_EX}Seleziona la qualità{Fore.LIGHTWHITE_EX}", choices=available_qualities_questions)

            # Get the embed_info code based on the answer of the user
            embed_info = available_qualities[quality_choice.lower()]

            if len(episodes) > 1:
                choice = inquirer_list(f"{Fore.LIGHTYELLOW_EX}Sono disponibili {len(episodes)} episodi. Vuoi che li scarico tutti?{Fore.LIGHTWHITE_EX}", default="Si", choices=["Si", "No"])

                if choice == "No":
                    print(f"{Fore.LIGHTCYAN_EX}[ATTENZIONE] {Fore.RESET} Inserisci gli episodi che vuoi scaricare separati da una virgola (,). Esempio: 1,4,5")
                    answer = None

                    while not answer:
                        answer = input(f"{Fore.LIGHTYELLOW_EX}Episodi: {Fore.LIGHTWHITE_EX}")

                        if len(answer) == 0:
                            print("Devi inserire almeno 1 episodio.")
                            continue

                        answer = answer.split(",")
                        for i in answer:
                            try:
                                int(i)
                            except ValueError:
                                print(f"{Fore.LIGHTRED_EX}[ERRORE] {Fore.LIGHTYELLOW_EX}Devono essere solo numeri.")
                                answer = None
                                break

                        answer = [int(i) for i in answer]

                    episodes = [i for index, i in enumerate(episodes, 1) if index in answer]

            for i in episodes:
                if not i["playable"]:
                    print(f"{Fore.LIGHTRED_EX}[ERRORE] {Fore.LIGHTYELLOW_EX}L'episodio {i['number']} non può essere scaricato.")
                    continue

                url = ds(i[embed_info])

                if i["video_type"] == "video/rcs":
                    url = url.replace("http:", "https:").replace(".net/z", ".net/i").replace("manifest.f4m", "master.m3u8")
                elif i["video_type"] == "video/vvvvid":
                    url = url.replace(url, f"https://or01.top-ix.org/videomg/_definst_/mp4:{url}/playlist.m3u8")

                show_title = i["show_title"]
                episode_number = i["number"]
                output = sub(r"\s", "_", f"{show_title}_Ep_{episode_number}_{quality_choice}.mp4")

                print(f"{Fore.LIGHTGREEN_EX}[INFO] {Fore.LIGHTYELLOW_EX}Sto scaricando l'episodio numero: {Fore.LIGHTWHITE_EX}{episode_number}")

                # Download episode with FFmpeg, covert the file from .ts to .mp4 and save it
                sp_run([
                    "ffmpeg" if ffmpeg_location == "ffmpeg" else str(Path().joinpath(ffmpeg_location).absolute()),
                    "-loglevel",
                    "fatal",
                    "-i",
                    url,
                    "-c",
                    "copy",
                    "-bsf:a",
                    "aac_adtstoasc",
                    str(Path().joinpath(save_location, output).absolute())
                ])

        print(f"{Fore.LIGHTGREEN_EX}[INFO] {Fore.LIGHTYELLOW_EX}Download completato.")
Example #16
0
    def run(self):
        """Run job."""

        if self.Settings.log > 1:
            print(r'\pagebreak')
            print(' ')
            print('# Run Simulation')

        if self.Settings.log > 1:
            print('\n## Summary of Analysis Parameters')

            print('|Description                          |Parameter|Value|')
            print('|:------------------------------------|:--------|:--|')
            print('|Maximum increments                   |   `incs`|',
                  self.Settings.incs, '|')
            print('|Maximum increment recycles           |   `cycl`|',
                  self.Settings.cycl, '|')
            print('|Maximum Newton-Rhapson iterations    |   `nfev`|',
                  self.Settings.nfev, '|')
            print('|Maximum incremental displacement     |     `du`|',
                  self.Settings.du, '|')
            print('|Maximum incremental LPF              |   `dlpf`|',
                  self.Settings.dlpf, '|')
            print(
                '|Initial control component            |     `j0`|',
                'LPF|' if self.Settings.j0 == None else self.Settings.j0 + '|')
            print('|Locked control component             |`j_fixed`|',
                  self.Settings.j_fixed, '|')
            print('|Maximum incremental overshoot        |  `dxtol`|',
                  self.Settings.dxtol, '|')
            print('|Tolerance for x                      |   `xtol`|',
                  self.Settings.xtol, '|')
            print('|Tolerance for f                      |   `ftol`|',
                  self.Settings.ftol, '|')

            if self.Settings.stepcontrol:
                print('\n### Adaptive control for incremental stepwidth')

                print(
                    '|Description                          |Parameter    |Value|'
                )
                print(
                    '|:------------------------------------|:------------|:--|'
                )
                print('|Adaptive control for inc. stepwidth  |`stepcontrol`|',
                      self.Settings.stepcontrol, '|')
                print('|Minimum step size factor             |     `minfac`|',
                      self.Settings.minfac, '|')
                print('|Maximum step size factor             |     `maxfac`|',
                      self.Settings.maxfac, '|')
                print('|Reduce step size factor              |     `reduce`|',
                      1 / self.Settings.reduce, '|')
                print('|Increase step size factor            |   `increase`|',
                      self.Settings.increase, '|')
            print('')

        # measure time
        self.time0_run = time.time()
        self.clock0_run = time.clock()

        # reduced modified displacement vector to active DOF and LPF
        self.Analysis.Vred = np.append(self.Analysis.Ured, 0)
        self.Analysis.lpf = 0

        # init LPF
        # self.Settings.lpf = self.Settings.dlpf
        #self.Analysis.lpf = self.Settings.dlpf

        for step in range(self.Settings.nsteps):

            # maximum number of increment and maximum value per step
            if type(self.Settings.incs) == tuple:
                incs = self.Settings.incs[step]
            else:
                incs = self.Settings.incs
            if type(self.Settings.xlimit[0]) == tuple:
                xlimit = self.Settings.xlimit[step]
            else:
                xlimit = self.Settings.xlimit

            if self.Settings.log > 0: print('\n## Step', step + 1)
            if self.Settings.log > 1:
                print(
                    r'* i(1) is index with 1st-biggest component in abs(Dx/Dx,max).'
                )
                print(
                    r'* i(2) is index with 2nd-biggest component in abs(Dx/Dx,max).'
                )
                print(
                    r'* i(3) is index with 3rd-biggest component in abs(Dx/Dx,max).'
                )
                print(
                    r'* i(4) is index with 4th-biggest component in abs(Dx/Dx,max).'
                )
                print(
                    r'* Value(i) is value of i-th component in abs(Dx/Dx,max).'
                )

                print(
                    r'$$\text{Value}_i = \left|\frac{D_x}{D_{x,max}}\right|_i$$'
                )

            # get reduced external force vector
            #f0red = self.ExtForces.forces[:,3*(step):3*(step+1)].flatten()[self.Analysis.DOF1]
            #self.Analysis.f0red = f0red.reshape(len(f0red),1)
            self.Analysis.ExtForces = copy.deepcopy(self.ExtForces)

            f0_const = np.zeros_like(self.ExtForces.forces[:, 3 * (step):3 *
                                                           (step + 1)])
            for s in range(step):
                f0_const += self.Results.step_lpf_end[
                    s] * self.ExtForces.forces[:, 3 * (s):3 * (s + 1)]
            if len(range(step)) is not 0:
                print(
                    '\nconstant part of external forces due to previous step(s)'
                )
                print('    ', f0_const, '\n')
                print('\ninitial values of active DOF due to previous step(s)')
                print('    ', self.Analysis.Vred, '\n')
            self.Analysis.ExtForces.forces_const = f0_const
            self.Analysis.ExtForces.forces = self.ExtForces.forces[:, 3 *
                                                                   (step):3 *
                                                                   (step + 1)]
            f0red = self.Analysis.ExtForces.forces.flatten()[
                self.Analysis.DOF1]
            self.Analysis.f0red = f0red.reshape(len(f0red), 1)

            self.Analysis.step = 1 + step

            res_V, res_a = pathfollow(
                self.equilibrium,
                self.stiffness,
                self.Analysis.Ured,
                self.Analysis,
                dxmax=[self.Settings.du, self.Settings.dlpf],
                j=self.Settings.j0,
                j_fixed=self.Settings.j_fixed,
                j_pre=self.Settings.j_pre,
                xlimit=xlimit,
                incs=incs,
                nfev=self.Settings.nfev,
                cycl=self.Settings.cycl,
                ftol=10**-self.Settings.ftol,
                xtol=10**-self.Settings.xtol,
                stepcontrol=self.Settings.stepcontrol,
                maxfac=self.Settings.maxfac,
                minfac=self.Settings.minfac,
                reduce=self.Settings.reduce,
                increase=self.Settings.increase,
                dxtol=self.Settings.dxtol,
                verbose=self.Settings.log)

            print(r'\pagebreak')
            print(' ')
            print(
                '\n### Create result object from analysis results for step {0:3d}\n'
                .format(1 + step))
            for i, (r_V, r_a) in enumerate(zip(res_V[1:], res_a[1:])):
                print('    write result {0:3d}/{1:3d} (LPF: {2:10.4g})'.format(
                    1 + i, len(res_V[1:]), r_a.lpf))
                self.Results.R[-1] = r_a
                self.Results.copy_increment()

            # copy initial U0
            self.Results.R[-1].U0 = np.copy(self.Results.R[-1].U)

            # append last lpf value
            self.Results.step_lpf_end.append(self.Results.R[-1].lpf)

            # reset LPF for new step
            if step + 1 < self.Settings.nsteps:
                self.Analysis.Vred[-1] = 0.0
                self.Analysis.lpf = 0.0
            else:
                self.Results.remove_last_increment()

            if self.Settings.log > 0: print('\nEnd of Step', step + 1)

        # duplicate first increment to get right indices
        self.Results.duplicate_first_increment()

        time_dclock_run = time.clock() - self.clock0_run
        time_dtime_run = time.time() - self.time0_run
        time_dclock_build = self.clock1_build - self.clock0_build
        time_dtime_build = self.time1_build - self.time0_build
        print(r'\pagebreak')
        print(' ')
        print('\n## Job duration')
        print(
            'Time measurement for execution times of "Model.build()" and "Model.run()".\n'
        )
        print('    total  cpu time "build": {:10.3f} seconds'.format(
            time_dclock_build))
        print('    total wall time "build": {:10.3f} seconds\n'.format(
            time_dtime_build))
        print('    total  cpu time "run":   {:10.3f} seconds'.format(
            time_dclock_run))
        print('    total wall time "run":   {:10.3f} seconds\n'.format(
            time_dtime_run))

        if self.logfile:
            sys.stdout = self.stdout
            sp_run([
                'pandoc', self.logfile_name + '.md', '-t', 'latex', '-o',
                self.logfile_name + '.pdf'
            ])
            if self.Settings.logpdf:
                sp_run([
                    'pandoc', self.logfile_name + '.md', '-t', 'html', '-s',
                    '-o', self.logfile_name + '.html'
                ])
import sys

from json import dump as json_dump
from os.path import abspath
from subprocess import run as sp_run

from tango import Linter
from tango import Validator

if __name__ == '__main__':
    config_yaml_path = abspath(sys.argv[1])

    v_res = Validator.validate(config_yaml_path)
    with open("/home/tango/out/validation.json", "w") as v_file:
        json_dump(v_res, v_file)

    if v_res["runners"][0]["errors"] or v_res["runners"][0]["failures"]:
        # Code is syntactically incorrect or using forbidden libs/funcs
        # Exit early
        exit(-1)

    l_res = Linter.lint(config_yaml_path)
    with open("/home/tango/out/quality.json", "w") as q_file:
        json_dump(l_res, q_file)

    # Actual test evaluation is handled entirely in R
    sp_run(["Rscript", "/home/tango/tester.R"], timeout=60)
 def run(*args, **kwargs):
     env = os.environ.copy()
     env['GIT_TERMINAL_PROMPT'] = '0'
     kwargs['env'] = env
     return sp_run(*args, **kwargs)