Пример #1
0
    def on_connectBtn_clicked(self):
        token = pytator.Auth.getToken(self.url, self.ui.username_field.text(),
                                      self.ui.password_field.text())
        if token is None:
            logging.warning("Access Denied")
            QtWidgets.QMessageBox.critical(
                self, "Access Denied",
                "Please check your username and password.")
        else:
            self.ui.login_widget.setVisible(False)

            tator = pytator.Tator(self.url, token, None)
            projects = tator.Project.all()
            # TODO landing page
            self.ui.tabWidget.addTab(QtWidgets.QWidget(self), "Welcome")
            for project in projects:
                self.ui.tabWidget.addTab(
                    ProjectDetail(self, self.background_thread, self.url,
                                  token, project['id']), project['name'])
            self.ui.tabWidget.setVisible(True)
            self.adjustSize()
            screenGeometry = QtWidgets.QApplication.desktop().screenGeometry()
            marginLeft = (screenGeometry.width() - self.width()) / 2
            marginRight = (screenGeometry.height() - self.height()) / 2
            self.move(marginLeft, marginRight)
Пример #2
0
    def __init__(self, parent, backgroundThread, url, token, projectId):
        super(ProjectDetail, self).__init__(parent)
        self.background_thread = backgroundThread
        self.ui = Ui_ProjectDetail()
        self.ui.setupUi(self)
        self.project_id = projectId
        self.tator = pytator.Tator(url, token, self.project_id)
        self.ui.sectionTree.setHeaderLabel("Media Files")
        # Enable multiple selections
        self.ui.sectionTree.setSelectionMode(
            QtWidgets.QTreeWidget.MultiSelection)

        self.ui.downloadBtn.setIcon(QtGui.QIcon(QT_DOWNLOAD_PATH))
        self.ui.uploadBtn.setIcon(QtGui.QIcon(QT_UPLOAD_PATH))
        self.ui.searchBtn.setIcon(QtGui.QIcon(QT_SEARCH_PATH))

        # Upload button is always active, download requires selection
        self.ui.uploadBtn.setEnabled(True)
        self.ui.downloadBtn.setEnabled(False)

        self.ui.sectionTree.itemSelectionChanged.connect(
            self.onSelectionChanged)

        self.ui.searchEdit.returnPressed.connect(self.refreshProjectData)
        self.ui.searchBtn.clicked.connect(self.refreshProjectData)
Пример #3
0
def test_stategraphic(url, token, project, video, box_type, track_type):
    tator = pytator.Tator(url, token, project)
    video_obj = tator.Media.get(pk=video)

    # Make boxes for track.
    boxes = [_make_box(project, box_type, video, frame) for frame in range(10)]
    status, response = tator.Localization.new(boxes)
    print(f"New localization response: {response}")
    assert status == 201
    box_ids = response['id']

    # Make track.
    status, response = tator.State.new([{
        'project': project,
        'type': track_type,
        'media_ids': [video],
        'localization_ids': box_ids,
    }])
    assert (status == 201)
    track_id = response['id'][0]

    # Get state graphic.
    code, stategraphic = tator.StateGraphic.get_bgr(track_id)
    assert (code == 200)
    assert (len(stategraphic) == 10)
    for frame_data in stategraphic:
        assert_vector_equal(frame_data.shape, (720, 1280, 3))
Пример #4
0
def test_get_audio(url, token, project, video):
    tator = pytator.Tator(url, token, project)
    video_obj = tator.Media.get(pk=video)

    audio = video_obj['media_files'].get('audio', [])
    assert len(audio) > 0
    assert audio[0]['codec'] == 'aac'
Пример #5
0
def test_state_crud(url, token, project, video_type, video, state_type):
    tator = pytator.Tator(url, token, project)
    video_obj = tator.Media.get(pk=video)

    # These fields will not be checked for object equivalence after patch.
    exclude = ['project', 'type', 'media_ids', 'id', 'meta', 'user', 'frame']

    # Test bulk create.
    num_states = random.randint(2000, 10000)
    states = [
        random_state(project, state_type, video_obj, post=True)
        for _ in range(num_states)
    ]
    status, response = tator.State.new(states)
    print(f"New state response: {response}")
    assert status == 201

    # Test single create.
    state = random_state(project, state_type, video_obj, post=True)
    status, response = tator.State.new([state])
    state_id = response['id'][0]
    assert status == 201

    # Patch single state.
    patch = random_state(project, state_type, video_obj)
    status, response = tator.State.update(state_id, patch)
    assert status == 200

    # Get single state.
    updated_state = tator.State.get(state_id)
    assert_close_enough(patch, updated_state, exclude)
    
    # Delete single state.
    status = tator.State.delete(state_id)
    assert status == 200

    # ES can be slow at indexing so wait for a bit.
    time.sleep(5)

    # Bulk update state attributes.
    bulk_patch = random_state(project, state_type, video_obj)
    bulk_patch = {'attributes': bulk_patch['attributes']}
    params = {'media_id': video, 'type': state_type}
    status, response = tator.State.bulk_update(params, bulk_patch)
    assert status == 200

    # Verify all states have been updated.
    states = tator.State.filter(params)
    for state in states:
        assert_close_enough(bulk_patch, state, exclude)
    
    # Delete all state.
    status = tator.State.bulk_delete(params)
    assert status == 200
    time.sleep(1)

    # Verify all states are gone.
    states = tator.State.filter(params)
    assert states == []
Пример #6
0
def test_get_file(url, token, project, video):
    tator = pytator.Tator(url, token, project)
    video_obj = tator.Media.get(pk=video)

    with tempfile.TemporaryDirectory() as temp_dir:
        outpath = os.path.join(temp_dir, "video.mp4")
        tator.Media.downloadFile(video_obj, outpath)
        assert (os.path.exists(outpath))
Пример #7
0
def test_get_frame(url, token, project, video):
    tator = pytator.Tator(url, token, project)
    video_obj = tator.Media.get(pk=video)

    frames = [50, 100, 150]
    code, frame_bgr = tator.GetFrame.get_bgr(video, frames)

    assert (code == 200)
    assert (len(frame_bgr) == 3)
    for frame_data in frame_bgr:
        assert_vector_equal(frame_data.shape, (720, 1280, 3))
Пример #8
0
def video_type(request, project):
    import pytator
    url = request.config.option.url
    token = request.config.option.token
    tator = pytator.Tator(url, token, project)
    status, response = tator.MediaType.new({
        'name': 'video_type',
        'description': 'Test video type',
        'project': project,
        'dtype': 'video',
    })
    video_type_id = response['id']
    yield video_type_id
    status = tator.MediaType.delete(video_type_id)
Пример #9
0
def project(request):
    """ Project ID for a created project. """
    import pytator
    url = request.config.option.url
    token = request.config.option.token
    tator = pytator.Tator(url, token, None)
    current_dt = datetime.datetime.now()
    dt_str = current_dt.strftime('%Y_%m_%d__%H_%M_%S')
    status, response = tator.Project.new({
        'name':
        f'test_project_{dt_str}',
        'summary':
        f'Test project created by pytator unit tests on {current_dt}',
    })
    project_id = response['id']
    yield project_id
    status = tator.Project.delete(project_id)
Пример #10
0
def video(request, project, video_type):
    import pytator
    url = request.config.option.url
    token = request.config.option.token
    tator = pytator.Tator(url, token, project)
    out_path = '/tmp/ForBiggerEscapes.mp4'
    if not os.path.exists(out_path):
        url = 'http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerEscapes.mp4'
        with requests.get(url, stream=True) as r:
            r.raise_for_status()
            with open(out_path, 'wb') as f:
                for chunk in r.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
    video_id = tator.Media.uploadFile(video_type, out_path)
    yield video_id
    status = tator.MediaType.delete(video_id)
Пример #11
0
def test_temporary_file(url, token, project):
    tator = pytator.Tator(url, token, project)

    all_temps = tator.TemporaryFile.all()
    assert all_temps is not None
    assert len(all_temps) == 0

    with tempfile.NamedTemporaryFile(mode='w', suffix=".txt") as temp:
        temp.write("foo")
        temp.flush()
        tator.TemporaryFile.uploadFile(temp.name)
        all_temps = tator.TemporaryFile.all()
        assert len(all_temps) == 1

    with tempfile.TemporaryDirectory() as temp_dir:
        temp_fp = os.path.join(temp_dir, "foo.txt")
        temp_element = tator.TemporaryFile.all()[0]
        tator.TemporaryFile.downloadFile(temp_element, temp_fp)
        with open(temp_fp, 'r') as temp_file:
            contents = temp_file.read()
            assert contents == "foo"
Пример #12
0
def track_type(request, project, video_type):
    import pytator
    url = request.config.option.url
    token = request.config.option.token
    tator = pytator.Tator(url, token, project)
    status, response = tator.StateType.new({
        'name':
        'track_type',
        'description':
        'Test track type',
        'project':
        project,
        'media_types': [video_type],
        'association':
        'Localization',
        'attribute_types':
        make_attribute_types(),
    })
    state_type_id = response['id']
    yield state_type_id
    status = tator.StateType.delete(state_type_id)
Пример #13
0
def box_type(request, project, video_type):
    import pytator
    url = request.config.option.url
    token = request.config.option.token
    tator = pytator.Tator(url, token, project)
    status, response = tator.LocalizationType.new({
        'name':
        'box_type',
        'description':
        'Test box type',
        'project':
        project,
        'media_types': [video_type],
        'dtype':
        'box',
        'attribute_types':
        make_attribute_types(),
    })
    box_type_id = response['id']
    yield box_type_id
    status = tator.LocalizationType.delete(box_type_id)
Пример #14
0
def test_version_crud(url, token, project):
    tator = pytator.Tator(url, token, project)

    # Test single create.
    status, response = tator.Version.new({
        'name': 'Test Version',
        'description': 'A version for testing',
    })
    pk = response['id']
    assert status == 201

    # Test patch.
    status, response = tator.Version.update(pk, {'name': 'Updated Version'})
    assert status == 200

    # Compare with get results.
    updated = tator.Version.get(pk)
    assert updated['name'] == 'Updated Version'

    # Test delete.
    status = tator.Version.delete(pk)
    assert status == 200
Пример #15
0
""" Example invocation 
./sampleFrame.py --url https://debug.tatorapp.com/rest --token <TOKEN> --project 1 --media 1 --frameInterval 5 --stateType 11

"""

if __name__=="__main__":
    parser=argparse.ArgumentParser()
    parser.add_argument("--url", required=True)
    parser.add_argument("--project", required=True)
    parser.add_argument("--token", required=True)
    parser.add_argument("--media", required=True)
    parser.add_argument("--frameInterval", required=True,type=int)
    parser.add_argument("--stateType", required=True)
    args = parser.parse_args()

    tator = pytator.Tator(args.url.rstrip('/'), args.token, args.project)
    media = tator.Media.byId(args.media)
    print(f"Processing '{media['name']}'")
    existing = tator.State.filter({"media_id": args.media})
    if existing:
        print("Skipping file, due to existing states")
        sys.exit(0)
    
    for frame in range(media['num_frames']):
        if frame % args.frameInterval == 0:
            print(f"Frame {frame}: Adding frame sample type")
            obj = {"media_ids": args.media,
                   "frame": frame,
                   "type" : args.stateType
              }
            tator.State.new(obj)
Пример #16
0
def test_localization_crud(url, token, project, video_type, video, box_type):
    tator = pytator.Tator(url, token, project)
    video_obj = tator.Media.get(pk=video)

    # These fields will not be checked for object equivalence after patch.
    exclude = ['project', 'type', 'media_id', 'id', 'meta', 'user']

    # Test bulk create.
    num_localizations = random.randint(2000, 10000)
    boxes = [
        random_localization(project, box_type, video_obj, post=True)
        for _ in range(num_localizations)
    ]
    status, response = tator.Localization.new(boxes)
    print(f"New localization response: {response}")
    assert status == 201

    # Test single create.
    box = random_localization(project, box_type, video_obj, post=True)
    status, response = tator.Localization.new([box])
    box_id = response['id'][0]
    assert status == 201

    # Patch single box.
    patch = random_localization(project, box_type, video_obj)
    status, response = tator.Localization.update(box_id, patch)
    print(f"Patch localization response: {response}")
    assert status == 200

    # Get single box.
    updated_box = tator.Localization.get(box_id)
    assert_close_enough(patch, updated_box, exclude)
    
    # Delete single box.
    status = tator.Localization.delete(box_id)
    assert status == 200

    # ES can be slow at indexing so wait for a bit.
    time.sleep(5)

    # Bulk update box attributes.
    bulk_patch = random_localization(project, box_type, video_obj)
    bulk_patch = {'attributes': bulk_patch['attributes']}
    params = {'media_id': video, 'type': box_type}
    status, response = tator.Localization.bulk_update(params, bulk_patch)
    assert status == 200

    # Verify all boxes have been updated.
    boxes = tator.Localization.filter(params)
    dataframe = tator.Localization.dataframe(params)
    assert(len(boxes)==len(dataframe))
    for box in boxes:
        assert_close_enough(bulk_patch, box, exclude)
    
    # Delete all boxes.
    status = tator.Localization.bulk_delete(params)
    assert status == 200
    time.sleep(1)

    # Verify all boxes are gone.
    boxes = tator.Localization.filter(params)
    assert boxes == []
Пример #17
0
    # Create an arg parse, collected required arguments for tator
    # as well as ones specific to the script
    parser = argparse.ArgumentParser(
        description="Query localizations on location")
    pytator.tator.cli_parser(parser)
    parser.add_argument("radius", help="In km")
    parser.add_argument("latitude", help="Degrees")
    parser.add_argument("longitude", help="Degrees")
    parser.add_argument("--locationKeyname", default="Deployment Location")
    parser.add_argument("--mediaTypeId", default=15)
    parser.add_argument("--species")
    args = parser.parse_args()

    # Initialize the tator object
    tator = pytator.Tator(args.url, args.token, args.project)

    # Execute query to fetch media
    # Example:
    # https://cvision.tatorapp.com/rest/EntityMedias/<proj_id>?attribute_distance=Deployment%20Location::20::-9.68::47.48&type=<media_type_id>
    distance_query = "::".join(
        [args.locationKeyname, args.radius, args.longitude, args.latitude])
    print(distance_query)
    # Raw URL: https://cvision.tatorapp.com/rest/EntityMedias/args.project?attribute_distance={args.locationKeyName}::args.radius::args.longitude::args.latitude&type=args.mediaTypeId
    media_in_radius = tator.Media.filter({
        "attribute_distance": distance_query,
        "type": args.mediaTypeId
    })
    print(f"Found {len(media_in_radius)} media elements in radius")

    # Iterate over each media element and output the thumbnail to the
Пример #18
0
def ingestMedia(args):
    parser = argparse.ArgumentParser(sys.argv[1], description=__doc__)
    parser.add_argument("-d", "--directory", help="Path to input directory")
    parser.add_argument("-f", "--file", help="Path to input file")
    parser.add_argument("--typeId",
                        help="Type ID of the media to import",
                        type=int,
                        required=True)
    parser.add_argument("--project", help="Project ID", required=True)
    parser.add_argument("--url", required=True, help="Server url")
    parser.add_argument("--token", required=True, help="Token for access")
    parser.add_argument("--extension",
                        default="mp4",
                        help="video file extension")
    parser.add_argument("--section", help="Section to apply to uploaded media")
    parser.add_argument("--parallel",
                        type=int,
                        default=4,
                        help="Number of workers use for uploads")

    args = parser.parse_args(args)

    if args.directory and args.file:
        print("ERROR: Can't supply both directory and file inputs")
        parser.print_help()
        sys.exit(-1)
    if args.directory == None and args.file == None:
        print("ERROR: Must supply either directory of file inputs")
        parser.print_help()
        sys.exit(-1)

    tator = pytator.Tator(args.url.rstrip('/'), args.token, args.project)
    medias = tator.Media

    def importFile(filepath, showProgress):
        md5 = pytator.md5sum.md5_sum(filepath)
        medias.uploadFile(args.typeId,
                          filepath,
                          False,
                          showProgress,
                          md5=md5,
                          section=args.section)
        return md5

    if args.directory:
        filesToProcess = []
        for root, subdirs, files in os.walk(args.directory):
            for fname in files:
                comps = os.path.splitext(fname)
                if len(comps) > 1:
                    if comps[1][1:] == args.extension:
                        filesToProcess.append(os.path.join(root, fname))

        progressbar.streams.wrap_stderr()
        bar = progressbar.ProgressBar(prefix='Files',
                                      redirect_stdout=True,
                                      redirect_stderr=True)

        in_process = []
        for fname in bar(filesToProcess):
            # Delete in process elements first
            for md5 in in_process:
                media_element = medias.byMd5(md5)
                if media_element:
                    in_process.remove(md5)
            while len(in_process) >= args.parallel:
                for md5 in in_process:
                    media_element = medias.byMd5(md5)
                    if media_element:
                        in_process.remove(md5)
                print("Waiting for transcodes...")
                print(f"In process = {in_process}")
                time.sleep(2.5)

            md5 = importFile(os.path.join(args.directory, fname), False)
            in_process.append(md5)
    else:
        importFile(args.file, True)
Пример #19
0
def ingestTracks(args):
    parser = argparse.ArgumentParser(sys.argv[1], description=__doc__)
    parser.add_argument("-i",
                        "--input",
                        help="Path to input file",
                        required=True)
    parser.add_argument("--mediaName", help="Override media name")
    parser.add_argument("--mediaId", help="Override media name (using id)")
    parser.add_argument("--url", required=True, help="Server url")
    parser.add_argument("--token", required=True, help="Token for access")
    parser.add_argument("--project", required=True, help="Project ID")
    parser.add_argument("--trackField",
                        help="Field to use for track association(after map)")

    parser.add_argument("--trackTypeId", help="typeId of the TrackType")

    parser.add_argument("--localizationTypeId",
                        required=True,
                        help="Type of the localization to query")

    parser.add_argument(
        "--map",
        nargs="*",
        help="Map an old attribute to a new attribute (old:new)")
    parser.add_argument("--ignore",
                        nargs="*",
                        help="Ignore an attribute from the json file.")
    parser.add_argument("--trackId", help="Only upload a specific track ID.")
    parser.add_argument(
        "--localizationIds",
        help="Path to file containing localization IDs for this track.")
    parser.add_argument("--version",
                        help="If given, save tracks with this version ID.")

    args = parser.parse_args(args)
    tator = pytator.Tator(args.url.rstrip('/'), args.token, args.project)
    mapped = {}
    tracksAPI = tator.Track
    localizations = tator.Localization
    ignored = []

    element = None
    guess = None
    if args.mediaName:
        element = tator.Media.byName(args.mediaName)
        guess = args.mediaName
    elif args.mediaId:
        element = tator.Media.byId(args.mediaId)
        guess = f"(id: {args.mediaId})"
    else:
        base = os.path.basename(args.input)
        mp4Guess = os.path.splitext(base)[0] + '.mp4'
        print("INFO: Trying mp4 extension...{}".format(mp4Guess))
        element = tator.Media.byName(mp4Guess)
        guess = mp4Guess

    if element is None:
        print(f"Could not find media {guess}, try using '--mediaName'")
        sys.exit(-1)

    mediaId = element["id"]

    if args.map:
        for mapArg in args.map:
            kv = mapArg.split(':')
            mapped[kv[0]] = kv[1]
    if args.ignore:
        ignored.extend(args.ignore)

    with open(args.input, 'r') as data:
        obj = json.load(data)
        tracks = obj["tracks"]
        count = 0
        if args.trackId:
            tracks = [track for track in tracks if track["id"] == args.trackId]
        filt = {
            "type": args.localizationTypeId,
            "media_id": mediaId,
        }
        if args.version:
            filt = {**filt, "version": args.version}
        for track in tracks:
            # 0.) Transform the json object to match what the
            # server wants
            for k, v in mapped.items():
                track[v] = track[k]
                del track[k]

            for k in ignored:
                if k in track:
                    del track[k]

            if args.localizationIds:
                with open(args.localizationIds, "r") as idFile:
                    localizationIds = json.load(idFile)
                mediaIds = [mediaId]
            else:
                localizationIds = []
                mediaIds = set()
                trackIds = set()
                #1.) Get all the localizations for this track id
                queryString = f"{args.trackField}::{track[args.trackField]}"
                localizationsInTrack = tator.Localization.filter({
                    **filt,
                    "search":
                    f"{args.trackField}:{track[args.trackField]}",
                })
                for localization in localizationsInTrack:
                    localizationIds.append(localization["id"])
                    mediaIds.add(localization['media'])

            track = useRealTypes(track)
            if len(mediaIds):
                tracksAPI.add(args.trackTypeId, list(mediaIds), track,
                              localizationIds, args.version)
            else:
                print("ERROR: Can't find localizations for {}".format(
                    track[args.trackField]))
                sys.exit(-1)
            if args.version:
                track['version'] = args.version
            count = count + 1
            print(f"Track {count}/{len(tracks)}", end="\r")

    print("")
Пример #20
0
def _ingestLocalizationsFromFile(args):
    tator = pytator.Tator(args.url.rstrip('/'), args.token, args.project)
    medias = tator.Media
    element = None
    guess = None
    if args.mediaName:
        element = medias.byName(args.mediaName)
        guess = args.mediaName
    elif args.mediaId:
        element = medias.byId(args.mediaId)
        guess = f"(id: {args.mediaId})"
    else:
        base = os.path.basename(args.input)
        mp4Guess = os.path.splitext(base)[0] + '.mp4'
        print("INFO: Trying mp4 extension...{}".format(mp4Guess))
        element = medias.byName(mp4Guess)
        guess = mp4Guess

    if element is None:
        print(f"Could not find media {guess}, try using '--mediaName'")
        return -1

    mediaId = element["id"]

    reservedWords = ['id']
    mapped = {}
    ignored = []

    if args.map:
        for mapArg in args.map:
            kv = mapArg.split(':')
            mapped[kv[0]] = kv[1]

    if args.ignore:
        ignored.extend(args.ignore)

    types = tator.LocalizationType
    typeElement = types.byTypeId(args.localizationTypeId)
    if typeElement is None:
        print(f"Unknown Localization Type ID ({args.localizationTypeId})")
        sys.exit(-1)

    print(
        f"Applying localizations of type '{typeElement['type']['name']}'(id={args.localizationTypeId}) to media='{element['name']}' (id={mediaId})"
    )

    localizations = tator.Localization

    with open(args.input, 'r') as data:
        obj = json.load(data)
        detections = obj["detections"]

        count = 0
        dimsToScale = ["h", "w", "x", "y"]

        if args.trackId:
            detections = [
                det for det in detections if det["id"] == args.trackId
            ]

        for detection in detections:
            count = count + 1
            if (count % 100 == 0):
                print(f"Processed {count} localizations")

            if args.shiftY:
                new = float(detection["y"]) + args.shiftY
                detection["y"] = str(new)

            if args.scale:
                for dim in dimsToScale:
                    detection[dim] = args.scale * float(detection[dim])
                    detection[dim] = int(round(detection[dim]))

            if args.frameRateScale:
                detection['frame'] = int(
                    round(args.frameRateScale * float(detection['frame'])))

            # By default we normalize, not no == true
            if not args.no_normalize:
                widths = ['x', 'x0', 'x1', 'w']
                heights = ['y', 'y0', 'y1', 'h']
                # Convert to floats first
                for dim in widths + heights:
                    if dim in detection:
                        if type(detection[dim]) == str:
                            detection[dim] = float(detection[dim])
                for width in widths:
                    if width in detection:
                        detection[width] = detection[width] / element['width']
                for height in heights:
                    if height in detection:
                        detection[
                            height] = detection[height] / element['height']

            for k in ignored:
                if k in detection:
                    del detection[k]

            for k in reservedWords:
                if k in detection and k not in mapped:
                    print(
                        f"found reserved word '{k}', needs '--map' or '--ignore'"
                    )
                    sys.exit(-1)

            for k, v in mapped.items():
                detection[v] = detection[k]
                del detection[k]

            detection['media_id'] = mediaId
            detection['type'] = args.localizationTypeId

            if args.version:
                detection['version'] = args.version

            detection = useRealTypes(detection)

        existing = localizations.filter({
            "media_id": mediaId,
            "type": args.localizationTypeId
        })
        if existing and not args.append:
            print(f"Not in append-mode Skipping {element['name']}")
            return

        # Block up the transport because django drops large POSTs
        blocks = math.ceil(len(detections) / 1000)
        dbIds = []
        for block in range(blocks):
            startIdx = (1000 * block)
            endIdx = (1000 * (block + 1))
            code, msg = localizations.addMany(detections[startIdx:endIdx])
            dbIds += msg['id']
            print(f"{code} : {msg}")
        if args.saveIds:
            with open(args.saveIds, "w") as idFile:
                json.dump(dbIds, idFile)
Пример #21
0
import math

if __name__ == '__main__':
    media_ids = os.getenv('TATOR_MEDIA_IDS')
    print(f"processing = {media_ids})")
    media_ids = [int(m) for m in media_ids.split(',')]
    rest_svc = os.getenv('TATOR_API_SERVICE')
    work_dir = os.getenv('TATOR_WORK_DIR')
    token=os.getenv('TATOR_AUTH_TOKEN')
    project_id=os.getenv('TATOR_PROJECT_ID')
    pipeline_args_str = os.getenv('TATOR_PIPELINE_ARGS')
    if pipeline_args_str:
        pipeline_args = json.loads(pipeline_args_str)
    else:
        pipeline_args = {}
    tator=pytator.Tator(rest_svc, token, project_id)
    
    work_filepath=os.path.join(work_dir, "work.csv")
    try:
        os.remove(work_filepath)
    except:
        pass

    # Download the network coefficients
    # Image stores coeffients in "/network" folder
    client=docker.from_env()
    image=client.images.pull(pipeline_args['data_image'])
    container=client.containers.create(pipeline_args['data_image'])
    bits, stats = container.get_archive("/network")
    network_tar = os.path.join(work_dir, "network.tar") 
    with open(network_tar, 'wb') as tar_file: