def cleanup_index(request, es, index_name):
    def fin():
        try:
            es.indices.delete(index_name)
        except NotFoundError:
            pass
    request.addfinalizer(fin)
Exemple #2
0
def file_server6(request):
    time.sleep(0.1)
    file_server6 = FileServer("::1", 1544)
    file_server6.ip_external = 'fca5:95d6:bfde:d902:8951:276e:1111:a22c'  # Fake external ip

    def listen():
        ConnectionServer.start(file_server6)
        ConnectionServer.listen(file_server6)

    gevent.spawn(listen)
    # Wait for port opening
    for retry in range(10):
        time.sleep(0.1)  # Port opening
        try:
            conn = file_server6.getConnection("::1", 1544)
            conn.close()
            break
        except Exception as err:
            print("FileServer6 startup error", Debug.formatException(err))
    assert file_server6.running
    file_server6.ip_incoming = {}  # Reset flood protection

    def stop():
        file_server6.stop()
    request.addfinalizer(stop)
    return file_server6
def install_context(request, tmpdir, monkeypatch):
    """Fixture to set up temporary installation directory.
    """
    # Save old values so we can restore them.
    new_cwd = tmpdir.mkdir('cwd')
    user_base = tmpdir.mkdir('user_base')
    user_site = tmpdir.mkdir('user_site')
    install_dir = tmpdir.mkdir('install_dir')

    def fin():
        # undo the monkeypatch, particularly needed under
        # windows because of kept handle on cwd
        monkeypatch.undo()
        new_cwd.remove()
        user_base.remove()
        user_site.remove()
        install_dir.remove()

    request.addfinalizer(fin)

    # Change the environment and site settings to control where the
    # files are installed and ensure we do not overwrite anything.
    monkeypatch.chdir(new_cwd)
    monkeypatch.setattr(easy_install_pkg, '__file__', user_site.strpath)
    monkeypatch.setattr('site.USER_BASE', user_base.strpath)
    monkeypatch.setattr('site.USER_SITE', user_site.strpath)
    monkeypatch.setattr('sys.path', sys.path + [install_dir.strpath])
    monkeypatch.setenv(str('PYTHONPATH'), str(os.path.pathsep.join(sys.path)))

    # Set up the command for performing the installation.
    dist = Distribution()
    cmd = easy_install(dist)
    cmd.install_dir = install_dir.strpath
    return cmd
def minikube(request, worker_id):
    def teardown():
        if not k8s_skip_teardown:
            try:
                print("Removing minikube container ...")
                mk.container.remove(force=True, v=True)
            except:
                pass

    request.addfinalizer(teardown)
    k8s_version = "v" + request.param.lstrip("v")
    k8s_timeout = int(request.config.getoption("--k8s-timeout"))
    k8s_container = request.config.getoption("--k8s-container")
    k8s_skip_teardown = request.config.getoption("--k8s-skip-teardown")
    mk = Minikube()
    mk.worker_id = worker_id
    if k8s_container:
        mk.connect(k8s_container, k8s_timeout)
        k8s_skip_teardown = True
    elif worker_id == "master" or worker_id == "gw0":
        mk.deploy(k8s_version, k8s_timeout)
        if worker_id == "gw0":
            k8s_skip_teardown = True
    else:
        mk.connect("minikube", k8s_timeout, version=k8s_version)
        k8s_skip_teardown = True
    return mk
def cleanup_index(request, es, index_name):
    def fin():
        try:
            es.indices.delete(index_name)
        except NotFoundError:
            pass
    request.addfinalizer(fin)
Exemple #6
0
def stop(request):
    def fin():
        if fixture:
            fixture.destroy()

    request.addfinalizer(fin)
    return fixture
Exemple #7
0
def site(request):
    threads_before = [obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet)]
    # Reset ratelimit
    RateLimit.queue_db = {}
    RateLimit.called_db = {}

    site = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")

    # Always use original data
    assert "1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT" in site.storage.getPath("")  # Make sure we dont delete everything
    shutil.rmtree(site.storage.getPath(""), True)
    shutil.copytree(site.storage.getPath("") + "-original", site.storage.getPath(""))

    # Add to site manager
    SiteManager.site_manager.get("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
    site.announce = mock.MagicMock(return_value=True)  # Don't try to find peers from the net

    def cleanup():
        site.delete()
        site.content_manager.contents.db.close("Test cleanup")
        site.content_manager.contents.db.timer_check_optional.kill()
        SiteManager.site_manager.sites.clear()
        db_path = "%s/content.db" % config.data_dir
        os.unlink(db_path)
        del ContentDb.content_dbs[db_path]
        gevent.killall([obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet) and obj not in threads_before])
    request.addfinalizer(cleanup)

    site.greenlet_manager.stopGreenlets()
    site = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")  # Create new Site object to load content.json files
    if not SiteManager.site_manager.sites:
        SiteManager.site_manager.sites = {}
    SiteManager.site_manager.sites["1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"] = site
    site.settings["serving"] = True
    return site
Exemple #8
0
def site_temp(request):
    threads_before = [
        obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet)
    ]
    with mock.patch("Config.config.data_dir", config.data_dir + "-temp"):
        site_temp = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
        site_temp.settings["serving"] = True
        site_temp.announce = mock.MagicMock(
            return_value=True)  # Don't try to find peers from the net

    def cleanup():
        site_temp.delete()
        site_temp.content_manager.contents.db.close("Test cleanup")
        site_temp.content_manager.contents.db.timer_check_optional.kill()
        db_path = "%s-temp/content.db" % config.data_dir
        os.unlink(db_path)
        del ContentDb.content_dbs[db_path]
        gevent.killall([
            obj for obj in gc.get_objects()
            if isinstance(obj, gevent.Greenlet) and obj not in threads_before
        ])

    request.addfinalizer(cleanup)
    site_temp.log = logging.getLogger("Temp:%s" % site_temp.address_short)
    return site_temp
Exemple #9
0
def minikube(request, worker_id):
    def teardown():
        if not k8s_skip_teardown:
            try:
                print("Removing minikube container ...")
                inst.container.remove(force=True, v=True)
            except:  # noqa pylint: disable=bare-except
                pass

    request.addfinalizer(teardown)
    k8s_version = "v" + request.param.lstrip("v")
    k8s_timeout = int(request.config.getoption("--k8s-timeout"))
    k8s_container = request.config.getoption("--k8s-container")
    k8s_skip_teardown = request.config.getoption("--k8s-skip-teardown")
    inst = Minikube()
    inst.worker_id = worker_id
    if k8s_container:
        inst.connect(k8s_container, k8s_timeout)
        k8s_skip_teardown = True
    elif worker_id in ("master", "gw0"):
        inst.deploy(k8s_version, k8s_timeout)
        if worker_id == "gw0":
            k8s_skip_teardown = True
    else:
        inst.connect(K8S_DEFAULT_CONTAINER_NAME,
                     k8s_timeout,
                     version=k8s_version)
        k8s_skip_teardown = True
    return inst
Exemple #10
0
def file_server4(request):
    time.sleep(0.1)
    file_server = FileServer("127.0.0.1", 1544)
    file_server.ip_external = "1.2.3.4"  # Fake external ip

    def listen():
        ConnectionServer.start(file_server)
        ConnectionServer.listen(file_server)

    gevent.spawn(listen)
    # Wait for port opening
    for retry in range(10):
        time.sleep(0.1)  # Port opening
        try:
            conn = file_server.getConnection("127.0.0.1", 1544)
            conn.close()
            break
        except Exception as err:
            print("FileServer6 startup error", Debug.formatException(err))
    assert file_server.running
    file_server.ip_incoming = {}  # Reset flood protection

    def stop():
        file_server.stop()
    request.addfinalizer(stop)
    return file_server
Exemple #11
0
def before_tests(request):
    # Download files for local use
    urllib.request.urlretrieve("https://software.broadinstitute.org/cancer/software/genepattern/data/all_aml/all_aml_test.gct", "all_aml_test.gct")
    urllib.request.urlretrieve("https://software.broadinstitute.org/cancer/software/genepattern/data/protocols/all_aml_test.preprocessed.comp.marker.odf",
                               "all_aml_test.preprocessed.comp.marker.odf")

    # Clean up after ourselves
    request.addfinalizer(after_tests)
Exemple #12
0
def httpd(request):
    class RequestHandler(http.server.SimpleHTTPRequestHandler):
        def do_POST(self):
            logging.info('\n%s\n%s', self.requestline, self.headers)
            self.do_GET()

        def do_GET(self):
            logging.info('\n%s\n%s', self.requestline, self.headers)
            if self.path == '/site5/redirect/':
                self.send_response(303, 'See other')
                self.send_header('Connection', 'close')
                self.send_header('Content-Length', 0)
                self.send_header('Location', '/site5/destination/')
                self.end_headers()
                self.wfile.write(b'')
            elif self.path == '/site9/redirect.html':
                self.send_response(303, 'See other')
                self.send_header('Connection', 'close')
                self.send_header('Content-Length', 0)
                self.send_header('Location', '/site9/destination.html')
                self.end_headers()
                self.wfile.write(b'')
            elif self.path.startswith('/infinite/'):
                payload = b'''
<html>
 <head>
  <title>infinite site</title>
 </head>
 <body>
  <a href='a/'>a/</a> <a href='b/'>b/</a> <a href='c/'>c/</a>
  <a href='d/'>d/</a> <a href='e/'>e/</a> <a href='f/'>f/</a>
  <a href='g/'>g/</a> <a href='h/'>h/</a> <a href='i/'>i/</a>
 </body>
</html>
'''
                self.send_response(200, 'OK')
                self.send_header('Connection', 'close')
                self.send_header('Content-Length', len(payload))
                self.end_headers()
                self.wfile.write(payload)
            else:
                super().do_GET()

    # SimpleHTTPRequestHandler always uses CWD so we have to chdir
    os.chdir(os.path.join(os.path.dirname(__file__), 'htdocs'))

    httpd = http.server.HTTPServer((local_address, 0), RequestHandler)
    httpd_thread = threading.Thread(name='httpd', target=httpd.serve_forever)
    httpd_thread.start()

    def fin():
        httpd.shutdown()
        httpd.server_close()
        httpd_thread.join()

    request.addfinalizer(fin)

    return httpd
Exemple #13
0
def client(request):
    """
    Creates client fixture which can be used to access Flask API
    @returns: instance of testable flask app
    """
    test_client = app.test_client()

    def teardown():
        pass

    request.addfinalizer(teardown)
    return test_client
Exemple #14
0
def teardown_test_data_dir(request):
    """
    TEARDOWN:
    ---------
        1) Remove the folder (+ all contents) at the end of the test suite.
    """

    # Teardown
    def remove_test_dir_and_db():
        shutil.rmtree(TEST_DATA_PATH)

    request.addfinalizer(remove_test_dir_and_db)
Exemple #15
0
def httpd(request):
    class RequestHandler(http.server.SimpleHTTPRequestHandler):
        def do_GET(self):
            if self.path == '/site5/redirect/':
                self.send_response(303, 'See other')
                self.send_header('Connection', 'close')
                self.send_header('Content-Length', 0)
                self.send_header('Location', '/site5/destination/')
                self.end_headers()
                self.wfile.write(b'')
            elif self.path == '/site9/redirect.html':
                self.send_response(303, 'See other')
                self.send_header('Connection', 'close')
                self.send_header('Content-Length', 0)
                self.send_header('Location', '/site9/destination.html')
                self.end_headers()
                self.wfile.write(b'')
            elif self.path.startswith('/infinite/'):
                payload = b'''
<html>
 <head>
  <title>infinite site</title>
 </head>
 <body>
  <a href='a/'>a/</a> <a href='b/'>b/</a> <a href='c/'>c/</a>
  <a href='d/'>d/</a> <a href='e/'>e/</a> <a href='f/'>f/</a>
  <a href='g/'>g/</a> <a href='h/'>h/</a> <a href='i/'>i/</a>
 </body>
</html>
'''
                self.send_response(200, 'OK')
                self.send_header('Connection', 'close')
                self.send_header('Content-Length', len(payload))
                self.end_headers()
                self.wfile.write(payload)
            else:
                super().do_GET()

    # SimpleHTTPRequestHandler always uses CWD so we have to chdir
    os.chdir(os.path.join(os.path.dirname(__file__), 'htdocs'))

    httpd = http.server.HTTPServer(('localhost', 0), RequestHandler)
    httpd_thread = threading.Thread(name='httpd', target=httpd.serve_forever)
    httpd_thread.start()

    def fin():
        httpd.shutdown()
        httpd.server_close()
        httpd_thread.join()
    request.addfinalizer(fin)

    return httpd
Exemple #16
0
async def principal(request, event_loop, caldav):
    """principal async fixture."""
    principal = await caldav.principal()

    def finalize():
        async def afin():
            await principal.prune()

        event_loop.run_until_complete(afin())

    request.addfinalizer(finalize)

    return principal
Exemple #17
0
def wms_server(request):
    """
    Run the WMS server for the duration of these tests
    """
    external_url = os.environ.get("SERVER_URL")
    if external_url:
        server = generic_obj()
        server.url = external_url
    else:
        server = WSGIServer(application=wms.app)
        server.start()
        request.addfinalizer(server.stop)
    return server
def block_remote_conections(request):
    """
    Block all remote calls during unit tests.
    """

    # inspired by https://stackoverflow.com/q/18601828

    def block_socket_side_effect(*args, **kwargs):
        raise ValueError(_BLOCKED_CONNECTION_MESSAGE)

    block_socket_patcher = mock.patch('socket.socket',
                                      side_effect=block_socket_side_effect)
    block_socket_patcher.start()

    request.addfinalizer(block_socket_patcher.stop)
Exemple #19
0
def httpd(request):
    # SimpleHTTPRequestHandler always uses CWD so we have to chdir
    os.chdir(os.path.join(os.path.dirname(__file__), 'htdocs'))

    httpd = http.server.HTTPServer(
            ('localhost', 0), http.server.SimpleHTTPRequestHandler)
    httpd_thread = threading.Thread(name='httpd', target=httpd.serve_forever)
    httpd_thread.start()

    def fin():
        httpd.shutdown()
        httpd.server_close()
        httpd_thread.join()
    request.addfinalizer(fin)

    return httpd
Exemple #20
0
def browser(request):
    try:
        from selenium import webdriver
        print("Starting chromedriver...")
        options = webdriver.chrome.options.Options()
        options.add_argument("--headless")
        options.add_argument("--window-size=1920x1080")
        options.add_argument("--log-level=1")
        browser = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH, service_log_path=os.path.devnull, options=options)

        def quit():
            browser.quit()
        request.addfinalizer(quit)
    except Exception as err:
        raise pytest.skip("Test requires selenium + chromedriver: %s" % err)
    return browser
Exemple #21
0
def httpd(request):
    # SimpleHTTPRequestHandler always uses CWD so we have to chdir
    os.chdir(os.path.join(os.path.dirname(__file__), 'htdocs'))

    httpd = http.server.HTTPServer(
            ('localhost', 0), http.server.SimpleHTTPRequestHandler)
    httpd_thread = threading.Thread(name='httpd', target=httpd.serve_forever)
    httpd_thread.start()

    def fin():
        httpd.shutdown()
        httpd.server_close()
        httpd_thread.join()
    request.addfinalizer(fin)

    return httpd
Exemple #22
0
def db(request):
    db_path = "%s/zeronet.db" % config.data_dir
    schema = {
        "db_name": "TestDb",
        "db_file": "%s/zeronet.db" % config.data_dir,
        "maps": {
            "data.json": {
                "to_table": [
                    "test", {
                        "node": "test",
                        "table": "test_importfilter",
                        "import_cols": ["test_id", "title"]
                    }
                ]
            }
        },
        "tables": {
            "test": {
                "cols": [["test_id", "INTEGER"], ["title", "TEXT"],
                         ["json_id", "INTEGER REFERENCES json (json_id)"]],
                "indexes": ["CREATE UNIQUE INDEX test_id ON test(test_id)"],
                "schema_changed":
                1426195822
            },
            "test_importfilter": {
                "cols": [["test_id", "INTEGER"], ["title", "TEXT"],
                         ["json_id", "INTEGER REFERENCES json (json_id)"]],
                "indexes": [
                    "CREATE UNIQUE INDEX test_importfilter_id ON test_importfilter(test_id)"
                ],
                "schema_changed":
                1426195822
            }
        }
    }

    if os.path.isfile(db_path):
        os.unlink(db_path)
    db = Db.Db(schema, db_path)
    db.checkTables()

    def stop():
        db.close("Test db cleanup")
        os.unlink(db_path)

    request.addfinalizer(stop)
    return db
def agent_image(minikube, registry, request, worker_id):
    def teardown():
        try:
            client.images.remove("%s:%s" % (agent_image_name, agent_image_tag))
        except:
            pass

    request.addfinalizer(teardown)
    client = get_docker_client()
    final_agent_image_name = request.config.getoption("--k8s-agent-name")
    final_agent_image_tag = request.config.getoption("--k8s-agent-tag")
    agent_image_name = "localhost:%d/%s" % (
        registry['port'], final_agent_image_name.split("/")[-1])
    agent_image_tag = final_agent_image_tag
    if worker_id == "master" or worker_id == "gw0":
        if not has_docker_image(client, final_agent_image_name,
                                final_agent_image_tag):
            print("\nAgent image '%s:%s' not found in local registry." %
                  (final_agent_image_name, final_agent_image_tag))
            print("\nAttempting to pull from remote registry ...")
            final_agent_image = client.images.pull(final_agent_image_name,
                                                   tag=final_agent_image_tag)
        else:
            final_agent_image = client.images.get(final_agent_image_name +
                                                  ":" + final_agent_image_tag)
        print("\nTagging %s:%s as %s:%s ..." %
              (final_agent_image_name, final_agent_image_tag, agent_image_name,
               agent_image_tag))
        final_agent_image.tag(agent_image_name, tag=agent_image_tag)
        print("\nPushing %s:%s ..." % (agent_image_name, agent_image_tag))
        client.images.push(agent_image_name, tag=agent_image_tag)
        print("\nPulling %s:%s ..." % (agent_image_name, agent_image_tag))
        minikube.pull_agent_image(agent_image_name, agent_image_tag,
                                  final_agent_image.id)
    else:
        print(
            "\nWaiting for agent image \"%s:%s\" to be pulled to minikube ..."
            % (agent_image_name, agent_image_tag))
        assert wait_for(p(has_docker_image, minikube.client, agent_image_name, agent_image_tag), timeout_seconds=60), \
            "timed out waiting for agent image \"%s:%s\"!" % (agent_image_name, agent_image_tag)
        final_agent_image = client.images.get(final_agent_image_name + ":" +
                                              final_agent_image_tag)
    return {
        "name": agent_image_name,
        "tag": agent_image_tag,
        "id": final_agent_image.id
    }
def setup_index(request, index_name):
    es = Elasticsearch()
    try:
        es.indices.create(index=index_name)
    except RequestError as e:
        if e.error == u'index_already_exists_exception':
            es.indices.delete(index_name)
        else:
            raise

    def fin():
        try:
            es.indices.delete(index_name)
        except NotFoundError:
            pass

    request.addfinalizer(fin)
def setup_index(request, index_name):
    es = Elasticsearch()
    try:
        es.indices.create(index=index_name)
    except RequestError as e:
        if e.error == u'index_already_exists_exception':
            es.indices.delete(index_name)
        else:
            raise

    def fin():
        try:
            es.indices.delete(index_name)
        except NotFoundError:
            pass

    request.addfinalizer(fin)
def download_and_extract(request, req, target):
    cmd = [
        sys.executable,
        '-m',
        'pip',
        'download',
        '--no-deps',
        '--no-binary',
        ':all:',
        req,
    ]
    output = subprocess.check_output(cmd, encoding='utf-8')
    filename = re.search('Saved (.*)', output).group(1)
    request.addfinalizer(functools.partial(os.remove, filename))
    opener = zipfile.ZipFile if filename.endswith('.zip') else tarfile.open
    with opener(filename) as archive:
        archive.extractall(target)
    return os.path.join(target, os.listdir(target)[0])
Exemple #27
0
def resetTempSettings(request):
    data_dir_temp = config.data_dir + "-temp"
    if not os.path.isdir(data_dir_temp):
        os.mkdir(data_dir_temp)
    open("%s/sites.json" % data_dir_temp, "w").write("{}")
    open("%s/filters.json" % data_dir_temp, "w").write("{}")
    open("%s/users.json" % data_dir_temp, "w").write("""
        {
            "15E5rhcAUD69WbiYsYARh4YHJ4sLm2JEyc": {
                "certs": {},
                "master_seed": "024bceac1105483d66585d8a60eaf20aa8c3254b0f266e0d626ddb6114e2949a",
                "sites": {}
            }
        }
    """)

    def cleanup():
        os.unlink("%s/sites.json" % data_dir_temp)
        os.unlink("%s/users.json" % data_dir_temp)
        os.unlink("%s/filters.json" % data_dir_temp)
    request.addfinalizer(cleanup)
Exemple #28
0
def site_temp(request):
    threads_before = [
        obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet)
    ]
    with mock.patch("Config.config.data_dir", config.data_dir + "-temp"):
        site_temp = Site("1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT")
        site_temp.announce = mock.MagicMock(
            return_value=True)  # Don't try to find peers from the net

    def cleanup():
        site_temp.storage.deleteFiles()
        site_temp.content_manager.contents.db.deleteSite(site_temp)
        site_temp.content_manager.contents.db.close()
        time.sleep(0.01)  # Wait for db close
        db_path = "%s-temp/content.db" % config.data_dir
        os.unlink(db_path)
        del ContentDb.content_dbs[db_path]
        gevent.killall([
            obj for obj in gc.get_objects()
            if isinstance(obj, gevent.Greenlet) and obj not in threads_before
        ])

    request.addfinalizer(cleanup)
    return site_temp
Exemple #29
0
def agent_image(minikube, registry, request, worker_id):  # pylint: disable=redefined-outer-name
    def teardown():
        if temp_agent_name and temp_agent_tag:
            try:
                client.images.remove("%s:%s" %
                                     (temp_agent_name, temp_agent_tag))
            except:  # noqa pylint: disable=bare-except
                pass

    request.addfinalizer(teardown)
    sfx_agent_name = request.config.getoption("--k8s-sfx-agent")
    if sfx_agent_name:
        try:
            agent_image_name, agent_image_tag = sfx_agent_name.rsplit(
                ":", maxsplit=1)
        except ValueError:
            agent_image_name = sfx_agent_name
            agent_image_tag = "latest"
    else:
        agent_image_name = "signalfx-agent"
        agent_image_tag = "k8s-test"
    temp_agent_name = None
    temp_agent_tag = None
    client = get_docker_client()
    if worker_id in ("master", "gw0"):
        if sfx_agent_name and not has_docker_image(client, sfx_agent_name):
            print('\nAgent image "%s" not found in local registry.' %
                  sfx_agent_name)
            print("Attempting to pull from remote registry to minikube ...")
            sfx_agent_image = minikube.pull_agent_image(
                agent_image_name, agent_image_tag)
            _, output = minikube.container.exec_run("docker images")
            print(output.decode("utf-8"))
            return {
                "name": agent_image_name,
                "tag": agent_image_tag,
                "id": sfx_agent_image.id
            }

        if sfx_agent_name:
            print('\nAgent image "%s" found in local registry.' %
                  sfx_agent_name)
            sfx_agent_image = client.images.get(sfx_agent_name)
        else:
            print(
                '\nBuilding agent image from local source and tagging as "%s:%s" ...'
                % (agent_image_name, agent_image_tag))
            subprocess.run(
                "make image",
                shell=True,
                env={
                    "PULL_CACHE": "yes",
                    "AGENT_IMAGE_NAME": agent_image_name,
                    "AGENT_VERSION": agent_image_tag
                },
                stderr=subprocess.STDOUT,
                check=True,
            )
            sfx_agent_image = client.images.get(agent_image_name + ":" +
                                                agent_image_tag)
        temp_agent_name = "localhost:%d/signalfx-agent-dev" % registry["port"]
        temp_agent_tag = "latest"
        print("\nPushing agent image to minikube ...")
        sfx_agent_image.tag(temp_agent_name, tag=temp_agent_tag)
        client.images.push(temp_agent_name, tag=temp_agent_tag)
        sfx_agent_image = minikube.pull_agent_image(temp_agent_name,
                                                    temp_agent_tag,
                                                    sfx_agent_image.id)
        sfx_agent_image.tag(agent_image_name, tag=agent_image_tag)
        _, output = minikube.container.exec_run("docker images")
        print(output.decode("utf-8"))
    else:
        print("\nWaiting for agent image to be built/pulled to minikube ...")
        assert wait_for(
            p(has_docker_image, minikube.client, agent_image_name,
              agent_image_tag),
            timeout_seconds=600,
            interval_seconds=2,
        ), 'timed out waiting for agent image "%s:%s"!' % (agent_image_name,
                                                           agent_image_tag)
        sfx_agent_image = minikube.client.images.get(agent_image_name + ":" +
                                                     agent_image_tag)
    return {
        "name": agent_image_name,
        "tag": agent_image_tag,
        "id": sfx_agent_image.id
    }