Пример #1
0
def create_pr(repo, from_branch, to_branch):
    print(f"Creating PR for {from_branch}")
    utils.subprocess_check_output([
        "gh", "pr", "create", "-R", repo, "--base", to_branch, "--head",
        from_branch, "--title", "Update CSV framework coverage reports",
        "--body", "This PR changes the CSV framework coverage reports."
    ])
Пример #2
0
def get_previous_run_id(repo, run_id, pr_number):
    """
    Gets the previous run id for a given workflow run, considering that the previous workflow run needs to come from the same PR.
    """

    # Get branch and repo from run:
    this_run = utils.subprocess_check_output([
        "gh", "api", "-X", "GET", f"repos/{repo}/actions/runs/{run_id}",
        "--jq",
        "{ head_branch: .head_branch, head_repository: .head_repository.full_name }"
    ])

    this_run = json.loads(this_run)
    pr_branch = this_run["head_branch"]
    pr_repo = this_run["head_repository"]

    # Get all previous runs that match branch, repo and workflow name:
    output = utils.subprocess_check_output([
        "gh", "api", "-X", "GET", f"repos/{repo}/actions/runs", "-f",
        "event=pull_request", "-f", "status=success", "-f",
        f"branch='{pr_branch}'", "--paginate", "--jq",
        f'[.workflow_runs.[] | select(.head_repository.full_name=="{pr_repo}" and .name=="{artifacts_workflow_name}")] | sort_by(.id) | reverse | [.[].id]'
    ])

    ids = []
    for l in [json.loads(l) for l in output.splitlines()]:
        for id in l:
            ids.append(id)

    if ids[0] != int(run_id):
        raise Exception(
            f"Expected to find {run_id} in the list of matching runs.")

    for previous_run_id in ids[1:]:
        utils.download_artifact(repo, "pr", "prev_run_pr", previous_run_id)

        try:
            with open("prev_run_pr/NR") as file:
                prev_pr_number = int(file.read())
                print(f"PR number: {prev_pr_number}")
        finally:
            if os.path.isdir("prev_run_pr"):
                shutil.rmtree("prev_run_pr")

        # the previous run needs to be coming from the same PR:
        if pr_number == prev_pr_number:
            return int(previous_run_id)

    raise Exception("Couldn't find previous run.")
Пример #3
0
def split_video(video_path, folder_path, splits):
    """
    video_path => full path to video \n
    folder_path => full path to folder \n
    splits => array of dictionaries containing keys of start and duration which are used for splitting
    """
    basename = os.path.split(video_path)[-1]
    extension = ''.join(pathlib.Path(basename).suffixes)
    videoname = basename.split('.')[0]
    
    split_video = ['ffmpeg', '-nostdin', '-y', '-i', video_path]
    for i, split in enumerate(splits):
        file_name = f'{videoname}-{i}{extension}'
        file_path = os.path.join(folder_path, file_name)
        split_video += ['-ss', split['start'], '-t', split['duration'], '-codec', 'copy', file_path]
    ret, failed = subprocess_check_output(split_video)
    outputpath = os.path.join(folder_path, f'output{extension}')
    ret, failed = subprocess_check_output(split_video)
    return ret, failed
Пример #4
0
def combine_videos(output_path, folder_path):
    files = []
    for file in os.listdir(folder_path):
        file_name = os.path.join(folder_path, file)
        extension = ''.join(pathlib.Path(file_name).suffixes)
        if 'mp4' in extension:
            files.append(os.path.join(folder_path, file))
    files = natsorted(files)
    print(files)
    ffmpeg_txt = os.path.join(folder_path, 'ffmpeg_split.txt')
    with open(ffmpeg_txt, 'w') as tf:
        for file in files:
            tf.write(f'file {file}\n')

    join_videos = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', ffmpeg_txt, '-c', 'copy', output_path]
    ret, failed = subprocess_check_output(join_videos)
    return ret, failed
Пример #5
0
def get_pr_number(repo, owner, from_branch, to_branch):
    ids = utils.subprocess_check_output([
        "gh", "api", "-X", "GET", f"repos/{repo}/pulls", "-f",
        f"name={to_branch}", "-f", f"head={owner}:{from_branch}", "--jq",
        "[.[].number]"
    ])

    ids = json.loads(ids)

    if len(ids) > 1:
        print(f"Found more than one PR that matches the branches. {ids}",
              file=sys.stderr)
        sys.exit(1)

    if len(ids) == 1:
        print(f"Matching PR found: {ids[0]}")
        return ids[0]

    return 0
Пример #6
0
 def get_output(arr):
     r = utils.subprocess_check_output(arr)
     return r.strip("\n'")
Пример #7
0
working_dir = ""
if len(sys.argv) > 1:
    working_dir = sys.argv[1]
else:
    print("Working directory is not specified")
    exit(1)

found_diff = False
overwrite_files()

os.chdir(working_dir)

already_open_pr = get_pr_number(repo, owner, branch_name, main)
try:
    utils.subprocess_check_output(["git", "diff", "--exit-code"])
    print("No differences found")
    found_diff = False
except:
    print("Differences found")
    found_diff = True

if not found_diff:
    if already_open_pr != 0:
        # close the PR
        utils.subprocess_run(
            ["gh", "pr", "close",
             str(already_open_pr), "-R", repo])
else:
    utils.subprocess_run(["git", "config", "user.name", "github-actions[bot]"])
    utils.subprocess_run([
        print("====================================== warning: no notes for " +
              str(univ2lect))
else:
    print('skipping notes')

if TRIM_TRACKING is True:
    if len(fielddat['tracking_data']) > 4:
        in_file = fielddat['tracking_data']
        dl_path = f'{tmpdir}/{in_file}'
        full_key = f'{univ2lect_key}/{in_file}'
        s3_download_file(bucket, full_key, dl_path)

        subprocess_check_output([
            'python3',
            os.path.join(thispath, 'trim_tracking_json.py'),
            tmpdir + fielddat['tracking_data'], '-ss',
            str(args.trimstart), '-te',
            str(args.trimend)
        ])

        out_file = fielddat['tracking_data'][:-len('.json')] + '_trimmed.json'
        out_path = f'{tmpdir}/{out_file}'
        assert os.path.isfile(out_path), out_path

        getorpostcontent(
            'tracking_data',
            fielddat['tracking_data'][:-len('.json')] + '_trimmed.json')
        full_key = f'{univ2lect_key}/{out_file}'
        s3_upload_file(bucket, full_key, out_path)
    else:
        print(
Пример #9
0
def download_and_trim_contours(bucket:str, key: str, localpath: str, trimstart:float=-1., trimend:float=-1., trimduration:float=-1., \
                                outdir:str='', refilter:bool=False, pickle_params:str=''):
   
    s3_download_folder(bucket, key, localpath)

    if trimend < 0. and trimduration < 0.:
        trimend      = int(1e9)
        trimduration = int(1e9)
    else:
        assert not (trimend > 0. and trimduration > 0.), 'use one or the other'
        if trimduration > 0.:
            trimend = trimstart + trimduration
        else:
            trimduration = trimend - trimstart
            assert trimduration > 0., str(trimend)+', '+str(trimstart)

    tsfileend = '_timestamps.png'

    #----------------------
    tmpdir = localpath
    while tmpdir.endswith('/'):
        tmpdir = tmpdir[:-1]
    subprocess_call(['mkdir','-p', tmpdir])

    #----------------------
    basecf = localpath.split('/')[-1]
    backupbaseblockfold = copy(basecf)
    if len(outdir) < 1:
        ival = None
        if '_' in basecf:
            try:
                ival = int(basecf.split('_')[-1])
            except ValueError:
                pass
        if isinstance(ival,int):
            ival += 1
            basecf = '_'.join(basecf.split('_')[:-1])+'_'+str(ival)
            # TODO: s3 head to check if this folder exists already
            print("ayyy: "+str(ival))
        else:
            basecf += '_0'
        outdir = basecf

    assert '/' not in outdir, str(outdir)

    print("outdir: "+str(outdir))
    
    localoutdir = os.path.join(os.path.dirname(tmpdir), outdir)
    subprocess_call(['mkdir','-p', localoutdir])
    
    univ2lect = '/'.join(key.split('/')[:-2])
    full_key = f'{univ2lect}/{outdir}'
    
    print("localoutdir: "+str(localoutdir))
    print("s3 out key: "+str(full_key))
    
    pngs = [os.path.join(tmpdir,ff) for ff in os.listdir(tmpdir) if ff.endswith(tsfileend)]
    assert len(pngs) > 0, str(pngs)+'\n'+str(list(os.listdir(tmpdir)))
    
    subtrme = int(round(float(trimstart)))
    maxtime = int(round(float(trimend  )))
    
    syncme = []

    for pngf in pngs:
        img = read_timestamps_image(pngf)
        assert isinstance(img,np.ndarray), str(type(img))
        assert len(img.shape) == 2, str(img.shape)
        describe(os.path.basename(pngf), img)
        if np.amin(img) > int(round(trimend)):
            continue
        # TODO: if we know erase times, remove contours which were erased before "trimstart"
        img[img<subtrme] = subtrme
        img[img>maxtime] = maxtime # TODO: this is needlessly destructive; but fixes potential frontend bugs (seek past end of video?)
        img -= subtrme

        writer = png.Writer(width=img.shape[1], height=img.shape[0], greyscale=True, alpha=False, bitdepth=16, compression=5)
        outfname = os.path.join(localoutdir,os.path.basename(pngf))
        syncme.append(outfname)
        with open(outfname, 'wb') as openedfile:
            writer.write(openedfile, img)
            
    for fpth in syncme:
        oldimgf = os.path.join(os.path.dirname(fpth), os.path.basename(fpth)[:-len(tsfileend)])
        oldfs = [os.path.join(tmpdir,ff) for ff in os.listdir(tmpdir) if not ff.endswith(tsfileend) and ff.startswith(os.path.basename(oldimgf))]
        assert len(oldfs) == 1, str(oldimgf)+'\n'+str(oldfs)
        oldfs = oldfs[0]
        subprocess_check_output(['cp', oldfs, localoutdir+'/'])
    
    metafiles = [os.path.join(tmpdir,ff) for ff in os.listdir(tmpdir) if ff.lower().startswith('meta') and ff.lower().endswith('.json')]
    
    for ff in metafiles:
        print("in meta.json, replacing \'"+str(backupbaseblockfold)+"\' with \'"+str(outdir)+"\'")
        replace_folder_in_metafile(ff, localoutdir+'/'+os.path.basename(ff), backupbaseblockfold, outdir)
    
    # TODO: ADD THIS FUNCTIONALITY BACK
    test_it = False
    if refilter and test_it is True:
        filterfile = '/evt/interactive-writing-segmentation/filter_keyframes.py'
        assert os.path.isfile(filterfile), filterfile
        fargs = ['python',filterfile,localoutdir,'--were_transparency_on_s3','--overwrite_in_place']
        if len(pickle_params) > 1:
            fargs += ['--autorun','--pickle_params',pickle_params]
        assert 0 == subprocess_call(fargs)

    print("syncing resulting folder")
    
    while full_key.endswith('/'):
        full_key = full_key[:-1]
    
    s3_upload_folder(bucket, full_key, localoutdir)
    return outdir