def task_restart(cluster_id, id, task_id): requests.post( config.get_connect_url(cluster_id) + "/connectors/" + id + "/tasks/" + task_id + "/restart", timeout=REQUEST_TIMEOUT_SEC, ) return connectors()
def new(cluster_id): data = request.get_json() if data is None: return ( jsonify({ "message": config.ERROR_MSG_NO_DATA.format( "There was no connector configuration provided") }), 400, ) if "name" in data: name = data["name"] del data["name"] cfg = {"name": name, "config": data} r = requests.post( config.get_connect_url(cluster_id) + "/connectors/", json=cfg, timeout=REQUEST_TIMEOUT_SEC, ) return jsonify(r.json()), r.status_code else: return jsonify({"message": "Missing configuration property 'name'."}), 400
def status(cluster_id, id): r = requests.get( config.get_connect_url(cluster_id) + "/connectors/" + id + "/status", timeout=REQUEST_TIMEOUT_SEC, ) return jsonify(r.json()), r.status_code
def test_load(): cache = get_cache_manager() resp = cache.load(0) assert resp is not None assert resp.state == [] assert resp.id == 0 assert resp.url == config.get_connect_url(0)
def load(self, id): res = self._redis.get(id) if res == None: return CacheEntry(id=id, url=config.get_connect_url(id), state=[]) else: return CacheEntry.from_dict(json.loads(res))
def run(self): cache = store.CacheManager(config.get_redis()) for cluster in config.get_connect_clusters(): try: cluster_id = cluster["id"] logger.info("loading cluster state {}".format(cluster)) state = connect.load_state(cluster_id) logger.info("merging cache {}".format(cluster)) cache.merge( store.CacheEntry( id=cluster_id, state=state, running=True, error_mesage=None, last_time_running=datetime.now(), )) logger.info("cache updated {}".format(cluster)) except ConnectionError: logger.info( config.ERROR_MSG_CLUSTER_NOT_REACHABLE.format( config.get_connect_url(cluster_id))) cache.merge( store.CacheEntry( id=cluster_id, running=False, error_mesage=config.ERROR_MSG_CLUSTER_NOT_REACHABLE. format(config.get_connect_url(cluster_id)), )) except Timeout: logger.info( config.ERROR_MSG_CLUSTER_TIMEOUT.format( config.get_connect_url(cluster_id))) cache.merge( store.CacheEntry( id=cluster_id, running=False, error_mesage=config.ERROR_MSG_CLUSTER_TIMEOUT.format( config.get_connect_url(cluster_id)), )) except Exception as e: logger.error("Could not update cache: %s", e)
def test_merge(): cache = get_cache_manager() resp = cache.load(0) assert resp is not None assert resp.state == [] assert resp.error_mesage is None assert resp.created is not None assert resp.running is None assert resp.last_time_running is None assert resp.id == 0 assert resp.url == config.get_connect_url(0) now = datetime.now() merged = cache.merge(CacheEntry(id=0, state={"foo": "bar"}, last_time_running=now)) assert merged.state == {"foo": "bar"} assert merged.error_mesage is None assert merged.created is not None assert merged.running is None assert merged.last_time_running == now assert merged.id == 0 assert merged.url == config.get_connect_url(0) resp = cache.load(0) assert resp.state == {"foo": "bar"} assert resp.error_mesage is None assert resp.created is not None assert resp.last_time_running == now assert resp.id == 0 assert resp.url == config.get_connect_url(0) merged = cache.merge(CacheEntry(id=0, error_mesage="foo", running=True)) assert merged.state == {"foo": "bar"} assert merged.error_mesage == "foo" assert merged.running == True resp = cache.load(0) assert resp.state == {"foo": "bar"} assert resp.error_mesage == "foo" assert resp.running == True assert resp.id == 0 assert resp.url == config.get_connect_url(0)
def merge(self, cache_entry: CacheEntry): cache_ttl = config.get_cache_ttl() if cache_entry.id is None: raise AssertionError("cache entry id is not set!") if cache_entry.url is None: cache_entry.url = config.get_connect_url(cache_entry.id) redis = self._redis res = redis.get(cache_entry.id) # is there already an entry in the cache if res is not None: old_cache = CacheEntry.from_dict(json.loads(res)) self._merge_state(new_cache=cache_entry, old_cache=old_cache) self._merge_last_time_running(new_cache=cache_entry, old_cache=old_cache) self._merge_created(new_cache=cache_entry, old_cache=old_cache) # only merge error message when state was not running if cache_entry.running == False: self._merge_error(new_cache=cache_entry, old_cache=old_cache) self._merge_running(new_cache=cache_entry, old_cache=old_cache) self._merge_url(new_cache=cache_entry, old_cache=old_cache) # only update cache when state was updated the last seconds if cache_entry != old_cache: redis.set( cache_entry.id, json.dumps(cache_entry.to_dict()), ex=cache_ttl ) else: logger.info( "The cache entry for cluster state (id '{}') will not be updated, because there were no changes.".format( cache_entry.id ) ) else: redis.set(cache_entry.id, json.dumps(cache_entry.to_dict()), ex=cache_ttl) return cache_entry
def validate(cluster_id, name): data = request.get_json() if data is None: return ( jsonify({ "message": config.ERROR_MSG_NO_DATA.format("connector configuration") }), 400, ) r = requests.put( config.get_connect_url(cluster_id) + "/connector-plugins/" + name + "/config/validate", json=data, timeout=REQUEST_TIMEOUT_SEC, ) return jsonify(r.json()), r.status_code
def update(cluster_id, id): data = request.get_json() if data is None: return ( jsonify({ "message": config.ERROR_MSG_NO_DATA.format( "There is no connector configuration for '" + id + "'") }), 400, ) r = requests.put( config.get_connect_url(cluster_id) + "/connectors/" + id + "/config", json=data, timeout=REQUEST_TIMEOUT_SEC, ) return jsonify(r.json()), r.status_code
def resume(cluster_id, id): requests.put( config.get_connect_url(cluster_id) + "/connectors/" + id + "/resume", timeout=REQUEST_TIMEOUT_SEC, ) return connectors()
def delete(cluster_id, id): requests.delete( config.get_connect_url(cluster_id) + "/connectors/" + id, timeout=REQUEST_TIMEOUT_SEC, ) return connectors()