def connect(self): host, port, user, pswd, path = \ pump.parse_spec(self.opts, self.spec, int(getattr(self.opts, "port", 11211))) if self.opts.ssl: port = couchbaseConstants.SSL_PORT return MCSink.connect_mc(host, port, user, pswd)
def connect(self): host, port, user, pswd, path = \ pump.parse_spec(self.opts, self.spec, int(getattr(self.opts, "port", 11211))) if self.opts.ssl: port = couchbaseConstants.SSL_PORT return MCSink.connect_mc(host, port, user, pswd, self.sink_map["name"])
def total_msgs(opts, source_bucket, source_node, source_map): source_name = source_node.get("hostname", None) if not source_name: return 0, None spec = source_map["spec"] name = source_bucket["name"] vbuckets_num = len(source_bucket["vBucketServerMap"]["vBucketMap"]) if not vbuckets_num: return 0, None vbucket_list = getattr(opts, "vbucket_list", None) stats_vals = {} host, port, user, pswd, _ = pump.parse_spec(opts, spec, 8091) for stats in ["curr_items", "vb_active_resident_items_ratio"]: path = "/pools/default/buckets/%s/stats/%s" % (name, stats) err, json, data = pump.rest_request_json(host, int(port), user, pswd, opts.ssl, path, reason="total_msgs") if err: return 0, None nodeStats = data.get("nodeStats", None) if not nodeStats: return 0, None vals = nodeStats.get(source_name, None) if not vals: return 0, None stats_vals[stats] = vals[-1] total_msgs = stats_vals["curr_items"] resident_ratio = stats_vals["vb_active_resident_items_ratio"] if 0 < resident_ratio < 100: # for DGM case, server will transfer both in-memory items and backfill all items on disk total_msgs += (resident_ratio / 100.0) * stats_vals["curr_items"] return 0, int(total_msgs)
def connect(self) -> Tuple[couchbaseConstants.PUMP_ERROR, Optional[cb_bin_client.MemcachedClient]]: host, port, user, pswd, path = \ pump.parse_spec(self.opts, self.spec, int(getattr(self.opts, "port", 11211))) if self.opts.ssl: port = couchbaseConstants.SSL_PORT return MCSink.connect_mc(host, port, user, pswd, self.sink_map["name"], self.opts.ssl, collections=self.opts.collection)
def check(opts, spec, source_map): host, port, user, pswd, path = \ pump.parse_spec(opts, spec, int(getattr(opts, "port", 11211))) rv, conn = MCSink.connect_mc(host, port, user, pswd) if rv != 0: return rv, None conn.close() return 0, None
def check(opts, spec: str, source_map) -> Tuple[couchbaseConstants.PUMP_ERROR, Optional[Dict[str, Any]]]: host, port, user, pswd, path = pump.parse_spec(opts, spec, int(getattr(opts, "port", 11211))) if opts.ssl: ports = couchbaseConstants.SSL_PORT rv, conn = MCSink.connect_mc(host, port, user, pswd, None, opts.ssl, opts.no_ssl_verify, opts.cacert) if rv != 0: return rv, None conn.close() # type: ignore return 0, None
def check(opts, spec, source_map): host, port, user, pswd, path = pump.parse_spec(opts, spec, int(getattr(opts, "port", 11211))) if opts.ssl: ports = couchbaseConstants.SSL_PORT rv, conn = MCSink.connect_mc(host, port, user, pswd) if rv != 0: return rv, None conn.close() return 0, None
def check(opts, spec: str, source_map) -> Tuple[couchbaseConstants.PUMP_ERROR, Optional[Dict[str, Any]]]: host, port, user, pswd, _ = pump.parse_spec(opts, spec, int(getattr(opts, "port", 11211))) if opts.ssl: port = couchbaseConstants.SSL_PORT rv, conn = MCSink.connect_mc(host, port, user, pswd, None, opts.ssl, opts.no_ssl_verify, opts.cacert) if rv != 0: return rv, None conn.close() # type: ignore return 0, None
def check(opts, spec, source_map): host, port, user, pswd, path = \ pump.parse_spec(opts, spec, int(getattr(opts, "port", 11211))) if opts.ssl: ports = couchbaseConstants.SSL_PORT rv, conn = MCSink.connect_mc(host, port, user, pswd, None) if rv != 0: return rv, None conn.close() return 0, None
def provide_design(opts, source_spec: str, source_bucket: Dict[str, Any], source_map) -> \ Tuple[couchbaseConstants.PUMP_ERROR, Optional[str]]: # Ephemeral buckets do not have design docs if source_bucket['bucketType'] == 'ephemeral': return 0, None spec_parts = source_map.get('spec_parts') if not spec_parts: return "error: no design spec_parts", None host, port, user, pswd, path = spec_parts source_nodes = pump.filter_bucket_nodes(source_bucket, spec_parts) if not source_nodes: source_nodes = source_bucket['nodes'] if not source_nodes: return f'error: no design source node; spec_parts: {spec_parts}', None couch_api_base = source_nodes[0].get('couchApiBase') if not couch_api_base: return 0, None # No couchApiBase; probably not 2.0. err, ddocs_json, ddocs = \ pump.rest_request_json(host, int(port), user, pswd, opts.ssl, "/pools/default/buckets/%s/ddocs" % (source_bucket['name']), reason="provide_design", verify=opts.no_ssl_verify, cacert=opts.cacert) if err and "response: 404" in err: # A 404/not-found likely means 2.0-DP4. ddocs_json = None ddocs_url = f'{couch_api_base}/_all_docs' ddocs_qry = "?startkey=\"_design/\"&endkey=\"_design0\"&include_docs=true" host, port, user, pswd, path = \ pump.parse_spec(opts, ddocs_url, 8092) # Not using user/pwd as 2.0-DP4 CAPI did not support auth. err, ddocs_json, ddocs = \ pump.rest_request_json(host, int(port), None, None, opts.ssl, path + ddocs_qry, reason="provide_design-2.0DP4", verify=opts.no_ssl_verify, cacert=opts.cacert) if err is not None: return err, None if ddocs is None: return 0, None if not ddocs.get('rows', None): return 0, None else: return 0, json.dumps(ddocs.get('rows', []))
def total_msgs(opts, source_bucket: Dict[str, Any], source_node, source_map: Dict[str, Any]) ->\ Tuple[couchbaseConstants.PUMP_ERROR, Optional[int]]: source_name = source_node.get("hostname", None) if not source_name: return 0, None spec = source_map['spec'] name = source_bucket['name'] vbuckets_num = len(source_bucket['vBucketServerMap']['vBucketMap']) if not vbuckets_num: return 0, None vbucket_list = getattr(opts, "vbucket_list", None) stats_vals = {} host, port, user, pswd, _ = pump.parse_spec(opts, spec, 8091) for stats in ["curr_items", "vb_active_resident_items_ratio"]: path = f'/pools/default/buckets/{name}/stats/{stats}' err, json, data = pump.rest_request_json(host, int(port), user, pswd, opts.ssl, path, reason="total_msgs", verify=opts.no_ssl_verify, cacert=opts.cacert) if err: return 0, None if data is not None: nodeStats = data.get("nodeStats", None) if not nodeStats: return 0, None vals = nodeStats.get(source_name, None) if not vals: return 0, None stats_vals[stats] = vals[-1] total_msgs = stats_vals["curr_items"] resident_ratio = stats_vals["vb_active_resident_items_ratio"] if 0 < resident_ratio < 100: # for DGM case, server will transfer both in-memory items and # backfill all items on disk total_msgs += (resident_ratio / 100.0) * stats_vals["curr_items"] return 0, int(total_msgs)
def total_msgs(opts, source_bucket, source_node, source_map): source_name = source_node.get("hostname", None) if not source_name: return 0, None spec = source_map['spec'] name = source_bucket['name'] vbuckets_num = len(source_bucket['vBucketServerMap']['vBucketMap']) if not vbuckets_num: return 0, None vbucket_list = getattr(opts, "vbucket_list", None) stats_vals = {} host, port, user, pswd, _ = pump.parse_spec(opts, spec, 8091) for stats in ["curr_items", "vb_active_resident_items_ratio"]: path = "/pools/default/buckets/%s/stats/%s" % (name, stats) err, json, data = pump.rest_request_json(host, int(port), user, pswd, opts.ssl, path, reason="total_msgs") if err: return 0, None nodeStats = data.get("nodeStats", None) if not nodeStats: return 0, None vals = nodeStats.get(source_name, None) if not vals: return 0, None stats_vals[stats] = vals[-1] total_msgs = stats_vals["curr_items"] resident_ratio = stats_vals["vb_active_resident_items_ratio"] if 0 < resident_ratio < 100: #for DGM case, server will transfer both in-memory items and #backfill all items on disk total_msgs += (resident_ratio / 100.0) * stats_vals["curr_items"] return 0, int(total_msgs)
def provide_design(opts, source_spec, source_bucket, source_map): spec_parts = source_map.get("spec_parts") if not spec_parts: return "error: no design spec_parts", None host, port, user, pswd, path = spec_parts source_nodes = pump.filter_bucket_nodes(source_bucket, spec_parts) if not source_nodes: source_nodes = source_bucket["nodes"] if not source_nodes: return ("error: no design source node; spec_parts: %s" % (spec_parts,), None) couch_api_base = source_nodes[0].get("couchApiBase") if not couch_api_base: return 0, None # No couchApiBase; probably not 2.0. err, ddocs_json, ddocs = pump.rest_request_json( host, int(port), user, pswd, opts.ssl, "/pools/default/buckets/%s/ddocs" % (source_bucket["name"]), reason="provide_design", ) if err and "response: 404" in err: # A 404/not-found likely means 2.0-DP4. ddocs_json = None ddocs_url = couch_api_base + "/_all_docs" ddocs_qry = '?startkey="_design/"&endkey="_design0"&include_docs=true' host, port, user, pswd, path = pump.parse_spec(opts, ddocs_url, 8092) # Not using user/pwd as 2.0-DP4 CAPI did not support auth. err, ddocs_json, ddocs = pump.rest_request_json( host, int(port), None, None, opts.ssl, path + ddocs_qry, reason="provide_design-2.0DP4" ) if err: return err, None if not ddocs.get("rows", None): return 0, None else: return 0, json.dumps(ddocs.get("rows", []))
def total_msgs(opts, source_bucket: Dict[str, Any], source_node, source_map: Dict[str, Any]) ->\ Tuple[couchbaseConstants.PUMP_ERROR, Optional[int]]: source_name = source_node.get("hostname", None) if not source_name: return 0, None spec = source_map['spec'] name = source_bucket['name'] vbuckets_num = len(source_bucket['vBucketServerMap']['vBucketMap']) if not vbuckets_num: return 0, None vbucket_list = getattr(opts, "vbucket_list", None) stats_vals = {} host, port, user, pswd, _ = pump.parse_spec(opts, spec, 8091) for stats in ["curr_items", "vb_active_resident_items_ratio"]: path = f'/pools/default/buckets/{name}/stats/{stats}' err, json, data = pump.rest_request_json(host, int(port), user, pswd, opts.ssl, path, reason="total_msgs", verify=opts.no_ssl_verify, cacert=opts.cacert) if err: return 0, None if data is not None: nodeStats = data.get("nodeStats", None) if not nodeStats: return 0, None vals = nodeStats.get(source_name, None) if not vals: return 0, None stats_vals[stats] = vals[-1] total_msgs = stats_vals["curr_items"] resident_ratio = stats_vals["vb_active_resident_items_ratio"] if 0 < resident_ratio < 100: # for DGM case, server will transfer both in-memory items and # backfill all items on disk total_msgs += (resident_ratio/100.0) * stats_vals["curr_items"] return 0, int(total_msgs)
def provide_design(opts, source_spec, source_bucket, source_map): spec_parts = source_map.get('spec_parts') if not spec_parts: return "error: no design spec_parts", None host, port, user, pswd, path = spec_parts source_nodes = pump.filter_bucket_nodes(source_bucket, spec_parts) if not source_nodes: source_nodes = source_bucket['nodes'] if not source_nodes: return ("error: no design source node; spec_parts: %s" % (spec_parts, ), None) couch_api_base = source_nodes[0].get('couchApiBase') if not couch_api_base: return 0, None # No couchApiBase; probably not 2.0. err, ddocs_json, ddocs = \ pump.rest_request_json(host, int(port), user, pswd, opts.ssl, "/pools/default/buckets/%s/ddocs" % (source_bucket['name']), reason="provide_design") if err and "response: 404" in err: # A 404/not-found likely means 2.0-DP4. ddocs_json = None ddocs_url = couch_api_base + "/_all_docs" ddocs_qry = "?startkey=\"_design/\"&endkey=\"_design0\"&include_docs=true" host, port, user, pswd, path = \ pump.parse_spec(opts, ddocs_url, 8092) # Not using user/pwd as 2.0-DP4 CAPI did not support auth. err, ddocs_json, ddocs = \ pump.rest_request_json(host, int(port), None, None, opts.ssl, path + ddocs_qry, reason="provide_design-2.0DP4") if err: return err, None if not ddocs.get('rows', None): return 0, None else: return 0, json.dumps(ddocs.get('rows', []))
def total_msgs(opts, source_bucket, source_node, source_map): source_name = source_node.get("hostname", None) if not source_name: return 0, None spec = source_map["spec"] name = source_bucket["name"] path = "/pools/default/buckets/%s/stats/curr_items" % (name) host, port, user, pswd, _ = pump.parse_spec(opts, spec, 8091) err, json, data = pump.rest_request_json(host, int(port), user, pswd, path, reason="total_msgs") if err: return 0, None nodeStats = data.get("nodeStats", None) if not nodeStats: return 0, None curr_items = nodeStats.get(source_name, None) if not curr_items: return 0, None return 0, curr_items[-1]
def provide_design(opts, source_spec, source_bucket, source_map): spec_parts = source_map.get('spec_parts') if not spec_parts: return "error: no design spec_parts", None host, port, user, pswd, path = spec_parts source_nodes = pump.filter_bucket_nodes(source_bucket, spec_parts) if not source_nodes: if spec_parts[0] not in ['localhost', '127.0.0.1']: return ("error: no design source node; spec_parts: %s" % (spec_parts,), None) else: source_nodes = source_bucket['nodes'] couch_api_base = source_nodes[0].get('couchApiBase') if not couch_api_base: return 0, None # No couchApiBase; probably not 2.0. err, ddocs_json, ddocs = \ pump.rest_request_json(host, int(port), user, pswd, "/pools/default/buckets/%s/ddocs" % (source_bucket['name']), reason="provide_design") if err and "response: 404" in err: # A 404/not-found likely means 2.0-DP4. ddocs_json = None ddocs_url = couch_api_base + "/_all_docs" ddocs_qry = "?startkey=\"_design/\"&endkey=\"_design0\"&include_docs=true" host, port, user, pswd, path = \ pump.parse_spec(opts, ddocs_url, 8092) # Not using user/pwd as 2.0-DP4 CAPI did not support auth. err, ddocs_json, ddocs = \ pump.rest_request_json(host, int(port), None, None, path + ddocs_qry, reason="provide_design-2.0DP4") if err: return err, None return 0, json.dumps(ddocs.get('rows', []))
def connect(self): host, port, user, pswd, path = \ pump.parse_spec(self.opts, self.spec, int(getattr(self.opts, "port", 11211))) return MCSink.connect_mc(host, port, user, pswd)
def consume_design(opts, sink_spec: str, sink_map, source_bucket, source_map, source_design: Union[str, bytes]) -> couchbaseConstants.PUMP_ERROR: if not source_design: return 0 try: sd = json.loads(source_design) except ValueError as e: return f'error: could not parse source design; exception: {e!s}' if not sd: return 0 if (not sink_map['buckets'] or len(sink_map['buckets']) != 1 or not sink_map['buckets'][0] or not sink_map['buckets'][0]['name']): return "error: design sink incorrect sink_map bucket" spec_parts = pump.parse_spec(opts, sink_spec, 8091) if not spec_parts: return "error: design sink no spec_parts: " + sink_spec sink_bucket = sink_map['buckets'][0] sink_nodes = pump.filter_bucket_nodes(sink_bucket, spec_parts) or \ sink_bucket['nodes'] if not sink_nodes: return "error: design sink nodes missing" couch_api_base = sink_nodes[0].get('couchApiBase') if not couch_api_base: return f'error: cannot restore bucket design on a couchbase cluster that does not support couch API;' \ f' the couchbase cluster may be an older, pre-2.0 version; please check your cluster URL: {sink_spec}' host, port, user, pswd, path = \ pump.parse_spec(opts, couch_api_base, 8092) if user is None: user = spec_parts[2] # Default to the main REST user/pwsd. pswd = spec_parts[3] if opts.username_dest is not None and opts.password_dest is not None: user = opts.username_dest user = opts.password_dest if type(sd) is dict: id = sd.get('_id', None) if id: str_source = _to_string(source_design) err, conn, response = \ pump.rest_request(host, int(port), user, pswd, opts.ssl, f'{path}/{id}', method='PUT', body=str_source, reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert) if conn: conn.close() if err: return f'error: could not restore design doc id: {id}; response: {response}; err: {err}' else: stmts = sd.get('statements', []) hostname = f'http://{spec_parts[0]}:{spec_parts[1]!s}' cm = ClusterManager(hostname, user, pswd, opts.ssl) try: for stmt in stmts: result, errors = cm.n1ql_query(stmt['statement'], stmt.get('args', None)) if errors: logging.error(f'N1QL query {stmt["statement"]} failed due to {errors}') if result and 'errors' in result: for error in result['errors']: logging.error(f'N1QL query {stmt["statement"]} failed due to error `{error["msg"]}`') except ServiceNotAvailableException as e: logging.error("Failed to restore indexes, cluster does not contain a query node") elif type(sd) is list: for row in sd: logging.debug(f'design_doc row: {row!s}') doc = row.get('doc', None) if not doc: stmt = row.get('statement', None) if not stmt: return f'error: missing design doc or index statement in row: {row}' else: #publish index return 0 if 'json' in doc and 'meta' in doc: js = doc['json'] id = doc['meta'].get('id', None) if not id: return f'error: missing id for design doc: {row}' else: # Handle design-doc from 2.0DP4. js = doc if '_rev' in js: del js['_rev'] id = row.get('id', None) if not id: return f'error: missing id for row: {row}' js_doc = json.dumps(js) if id.startswith(CBSink.DDOC_HEAD): id = CBSink.DDOC_HEAD + urllib.parse.quote(id[len(CBSink.DDOC_HEAD):], '') else: id = urllib.parse.quote(id, '') logging.debug(f'design_doc: {js_doc}') logging.debug(f'design_doc id: {id} at: {path}/{id}') try: err, conn, response = \ pump.rest_request(host, int(port), user, pswd, opts.ssl, f'{path}/{id}', method='PUT', body=js_doc, reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert) if conn: conn.close() if err: return f'error: could not restore design doc id: {id}; response: {response}; err: {err}' except Exception as e: return f'error: design sink exception: {e}; couch_api_base: {couch_api_base}' logging.debug(f'design_doc created at: {path}/{id}') return 0
def consume_design( opts, sink_spec: str, sink_map, source_bucket, source_map, source_design: Union[str, bytes]) -> couchbaseConstants.PUMP_ERROR: if not source_design: return 0 try: sd = json.loads(source_design) except ValueError as e: return f'error: could not parse source design; exception: {e!s}' if not sd: return 0 if (not sink_map['buckets'] or len(sink_map['buckets']) != 1 or not sink_map['buckets'][0] or not sink_map['buckets'][0]['name']): return "error: design sink incorrect sink_map bucket" spec_parts = pump.parse_spec(opts, sink_spec, 8091) if not spec_parts: return "error: design sink no spec_parts: " + sink_spec sink_bucket = sink_map['buckets'][0] sink_nodes = pump.filter_bucket_nodes(sink_bucket, spec_parts) or \ sink_bucket['nodes'] if not sink_nodes: return "error: design sink nodes missing" couch_api_base = sink_nodes[0].get('couchApiBase') if not couch_api_base: return f'error: cannot restore bucket design on a couchbase cluster that does not support couch API;' \ f' the couchbase cluster may be an older, pre-2.0 version; please check your cluster URL: {sink_spec}' host, port, user, pswd, path = \ pump.parse_spec(opts, couch_api_base, 8092) if user is None: user = spec_parts[2] # Default to the main REST user/pwsd. pswd = spec_parts[3] if opts.username_dest is not None and opts.password_dest is not None: user = opts.username_dest user = opts.password_dest if type(sd) is dict: id = sd.get('_id', None) if id: str_source = _to_string(source_design) err, conn, response = \ pump.rest_request(host, int(port), user, pswd, opts.ssl, f'{path}/{id}', method='PUT', body=str_source, reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert) if conn: conn.close() if err: return f'error: could not restore design doc id: {id}; response: {response}; err: {err}' else: stmts = sd.get('statements', []) hostname = f'http://{spec_parts[0]}:{spec_parts[1]!s}' cm = ClusterManager(hostname, user, pswd, opts.ssl) try: for stmt in stmts: result, errors = cm.n1ql_query(stmt['statement'], stmt.get('args', None)) if errors: logging.error( f'N1QL query {stmt["statement"]} failed due to {errors}' ) if result and 'errors' in result: for error in result['errors']: logging.error( f'N1QL query {stmt["statement"]} failed due to error `{error["msg"]}`' ) except ServiceNotAvailableException as e: logging.error( "Failed to restore indexes, cluster does not contain a query node" ) elif type(sd) is list: for row in sd: logging.debug(f'design_doc row: {row!s}') doc = row.get('doc', None) if not doc: stmt = row.get('statement', None) if not stmt: return f'error: missing design doc or index statement in row: {row}' # publish index return 0 if 'json' in doc and 'meta' in doc: js = doc['json'] id = doc['meta'].get('id', None) if not id: return f'error: missing id for design doc: {row}' else: # Handle design-doc from 2.0DP4. js = doc if '_rev' in js: del js['_rev'] id = row.get('id', None) if not id: return f'error: missing id for row: {row}' js_doc = json.dumps(js) if id.startswith(CBSink.DDOC_HEAD): id = CBSink.DDOC_HEAD + urllib.parse.quote( id[len(CBSink.DDOC_HEAD):], '') else: id = urllib.parse.quote(id, '') logging.debug(f'design_doc: {js_doc}') logging.debug(f'design_doc id: {id} at: {path}/{id}') try: err, conn, response = \ pump.rest_request(host, int(port), user, pswd, opts.ssl, f'{path}/{id}', method='PUT', body=js_doc, reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert) if conn: conn.close() if err: return f'error: could not restore design doc id: {id}; response: {response}; err: {err}' except Exception as e: return f'error: design sink exception: {e}; couch_api_base: {couch_api_base}' logging.debug(f'design_doc created at: {path}/{id}') return 0
def consume_design(opts, sink_spec, sink_map, source_bucket, source_map, source_design): if not source_design: return 0 try: sd = json.loads(source_design) except ValueError as e: return "error: could not parse source design; exception: %s" % (e) if not sd: return 0 if (not sink_map['buckets'] or len(sink_map['buckets']) != 1 or not sink_map['buckets'][0] or not sink_map['buckets'][0]['name']): return "error: design sink incorrect sink_map bucket" spec_parts = pump.parse_spec(opts, sink_spec, 8091) if not spec_parts: return "error: design sink no spec_parts: " + sink_spec sink_bucket = sink_map['buckets'][0] sink_nodes = pump.filter_bucket_nodes(sink_bucket, spec_parts) or \ sink_bucket['nodes'] if not sink_nodes: return "error: design sink nodes missing" couch_api_base = sink_nodes[0].get('couchApiBase') if not couch_api_base: return "error: cannot restore bucket design" \ " on a couchbase cluster that does not support couch API;" \ " the couchbase cluster may be an older, pre-2.0 version;" \ " please check your cluster URL: " + sink_spec host, port, user, pswd, path = \ pump.parse_spec(opts, couch_api_base, 8092) if user is None: user = spec_parts[2] # Default to the main REST user/pwsd. pswd = spec_parts[3] if opts.username_dest is not None and opts.password_dest is not None: user = opts.username_dest user = opts.password_dest if type(sd) is dict: id = sd.get('_id', None) if id: err, conn, response = \ pump.rest_request(host, int(port), user, pswd, opts.ssl, path + "/" + id, method='PUT', body=source_design, reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert) if conn: conn.close() if err: return ("error: could not restore design doc id: %s" + "; response: %s; err: %s") % (id, response, err) else: stmts = sd.get('statements', []) hostname = 'http://' + spec_parts[0] + ':' + str(spec_parts[1]) cm = ClusterManager(hostname, user, pswd, opts.ssl) try: for stmt in stmts: result, errors = cm.n1ql_query(stmt['statement'], stmt.get('args', None)) if errors: logging.error('N1QL query %s failed due to %s' % (stmt['statement'], errors)) if result and 'errors' in result: for error in result['errors']: logging.error('N1QL query %s failed due to error `%s`' % (stmt['statement'], error['msg'])) except ServiceNotAvailableException as e: logging.error("Failed to restore indexes, cluster does not contain a" + " query node") elif type(sd) is list: for row in sd: logging.debug("design_doc row: " + str(row)) doc = row.get('doc', None) if not doc: stmt = row.get('statement', None) if not stmt: return "error: missing design doc or index statement in row: %s" % (row) else: #publish index return 0 if 'json' in doc and 'meta' in doc: js = doc['json'] id = doc['meta'].get('id', None) if not id: return "error: missing id for design doc: %s" % (row) else: # Handle design-doc from 2.0DP4. js = doc if '_rev' in js: del js['_rev'] id = row.get('id', None) if not id: return "error: missing id for row: %s" % (row) js_doc = json.dumps(js) if id.startswith(CBSink.DDOC_HEAD): id = CBSink.DDOC_HEAD + urllib.parse.quote(id[len(CBSink.DDOC_HEAD):], '') else: id = urllib.parse.quote(id, '') logging.debug("design_doc: " + js_doc) logging.debug("design_doc id: " + id + " at: " + path + "/" + id) try: err, conn, response = \ pump.rest_request(host, int(port), user, pswd, opts.ssl, path + "/" + id, method='PUT', body=js_doc, reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert) if conn: conn.close() if err: return ("error: could not restore design doc id: %s" + "; response: %s; err: %s") % (id, response, err) except Exception as e: return ("error: design sink exception: %s" + "; couch_api_base: %s") % (e, couch_api_base) logging.debug("design_doc created at: " + path + "/" + id) return 0
source_design): if not source_design: return 0 try: sd = json.loads(source_design) if not sd: return 0 except ValueError, e: return "error: could not parse source design; exception: %s" % (e) err, index_server = pump.filter_server(opts, sink_spec, 'index') if err or not index_server: logging.error("could not find index server") return 0 spec_parts = pump.parse_spec(opts, sink_spec, 8091) if not spec_parts: return "error: design sink no spec_parts: " + sink_spec host, port, user, pswd, path = spec_parts host, port = pump.hostport(index_server) sink_bucket = sink_map['buckets'][0] url = "/restoreIndexMetadata?bucket=%s" % sink_bucket['name'] err, conn, response = \ pump.rest_request(host, couchbaseConstants.INDEX_PORT, user, pswd, opts.ssl, url, method='POST', #body=urllib.urlencode(sd), body=json.dumps(sd), #headers=post_headers, reason='restore index') logging.debug(response) return 0
class CBSink(pump_mc.MCSink): """Smart client sink to couchbase cluster.""" def __init__(self, opts, spec, source_bucket, source_node, source_map, sink_map, ctl, cur): super(CBSink, self).__init__(opts, spec, source_bucket, source_node, source_map, sink_map, ctl, cur) self.rehash = opts.extra.get("rehash", 0) def scatter_gather(self, mconns, batch): sink_map_buckets = self.sink_map['buckets'] if len(sink_map_buckets) != 1: return "error: CBSink.run() expected 1 bucket in sink_map", None, None vbuckets_num = len( sink_map_buckets[0]['vBucketServerMap']['vBucketMap']) vbuckets = batch.group_by_vbucket_id(vbuckets_num, self.rehash) # Scatter or send phase. for vbucket_id, msgs in vbuckets.iteritems(): rv, conn = self.find_conn(mconns, vbucket_id) if rv != 0: return rv, None, None rv = self.send_msgs(conn, msgs, self.operation(), vbucket_id=vbucket_id) if rv != 0: return rv, None, None # Yield to let other threads do stuff while server's processing. time.sleep(0.01) retry_batch = None need_refresh = False # Gather or recv phase. for vbucket_id, msgs in vbuckets.iteritems(): rv, conn = self.find_conn(mconns, vbucket_id) if rv != 0: return rv, None, None rv, retry, refresh = self.recv_msgs(conn, msgs, vbucket_id=vbucket_id) if rv != 0: return rv, None, None if retry: retry_batch = batch if refresh: need_refresh = True if need_refresh: self.refresh_sink_map() return 0, retry_batch, retry_batch and not need_refresh @staticmethod def can_handle(opts, spec): return (spec.startswith("http://") or spec.startswith("couchbase://")) @staticmethod def check_source(opts, source_class, source_spec, sink_class, sink_spec): if (source_spec.startswith("http://") or source_spec.startswith("couchbase://")): return None return pump.Sink.check_source(opts, source_class, source_spec, sink_class, sink_spec) @staticmethod def check(opts, spec, source_map): rv, sink_map = pump.rest_couchbase(opts, spec) if rv != 0: return rv, None rv, source_bucket_name = pump.find_source_bucket_name(opts, source_map) if rv != 0: return rv, None rv, sink_bucket_name = pump.find_sink_bucket_name( opts, source_bucket_name) if rv != 0: return rv, None # Adjust sink_map['buckets'] to have only our sink_bucket. sink_buckets = [ bucket for bucket in sink_map['buckets'] if bucket['name'] == sink_bucket_name ] if not sink_buckets: return "error: missing bucket-destination: " + sink_bucket_name + \ " at destination: " + spec + \ "; perhaps your username/password is missing or incorrect", None if len(sink_buckets) != 1: return "error: multiple buckets with name: " + sink_bucket_name + \ " at destination: " + spec, None sink_map['buckets'] = sink_buckets return 0, sink_map def refresh_sink_map(self): """Grab a new vbucket-server-map.""" logging.warn("refreshing sink map: %s" % (self.spec)) rv, new_sink_map = CBSink.check(self.opts, self.spec, self.source_map) if rv == 0: self.sink_map = new_sink_map return rv @staticmethod def consume_design(opts, sink_spec, sink_map, source_bucket, source_map, source_design): if not source_design: return 0 try: sd = json.loads(source_design) except ValueError, e: return "error: could not parse source design; exception: %s" % (e) if not sd: return 0 if (not sink_map['buckets'] or len(sink_map['buckets']) != 1 or not sink_map['buckets'][0] or not sink_map['buckets'][0]['name']): return "error: design sink incorrect sink_map bucket" spec_parts = pump.parse_spec(opts, sink_spec, 8091) if not spec_parts: return "error: design sink no spec_parts: " + sink_spec sink_bucket = sink_map['buckets'][0] sink_nodes = pump.filter_bucket_nodes(sink_bucket, spec_parts) or \ sink_bucket['nodes'] if not sink_nodes: return "error: design sink nodes missing" couch_api_base = sink_nodes[0].get('couchApiBase') if not couch_api_base: return "error: cannot restore bucket design" \ " on a couchbase cluster that does not support couch API;" \ " the couchbase cluster may be an older, pre-2.0 version;" \ " please check your cluster URL: " + sink_spec host, port, user, pswd, path = \ pump.parse_spec(opts, couch_api_base, 8092) if user is None: user = spec_parts[2] # Default to the main REST user/pwsd. pswd = spec_parts[3] for row in sd: logging.debug("design_doc row: " + str(row)) doc = row.get('doc', None) if not doc: return "error: missing design doc in row: %s" % (row) if 'json' in doc and 'meta' in doc: js = doc['json'] id = doc['meta'].get('id', None) if not id: return "error: missing id for design doc: %s" % (row) else: # Handle design-doc from 2.0DP4. js = doc if '_rev' in js: del js['_rev'] id = row.get('id', None) if not id: return "error: missing id for row: %s" % (row) js_doc = json.dumps(js) logging.debug("design_doc: " + js_doc) logging.debug("design_doc id: " + id + " at: " + path + "/" + id) try: err, conn, response = \ pump.rest_request(host, int(port), user, pswd, path + "/" + id, method='PUT', body=js_doc, reason="consume_design") if conn: conn.close() if err: return ("error: could not restore design doc id: %s" + "; response: %s; err: %s") % (id, response, err) except Exception, e: return ("error: design sink exception: %s" + "; couch_api_base: %s") % (e, couch_api_base) logging.debug("design_doc created at: " + path + "/" + id)
def encode_tap_connect_opts(opts, backfill=False, vblist=None): header = 0 val = [] for op in sorted(opts.keys()): header |= op if op in memcacheConstants.TAP_FLAG_TYPES: val.append(struct.pack(memcacheConstants.TAP_FLAG_TYPES[op], opts[op])) elif backfill and op == memcacheConstants.TAP_FLAG_CHECKPOINT: if opts[op][2] >= 0: val.append(struct.pack(">HHQ", opts[op][0], opts[op][1], opts[op][2])) elif vblist and op == memcacheConstants.TAP_FLAG_LIST_VBUCKETS: val.apend(struct.pack(">H", len(vblist)) vblist = vblist[1:-1].split(",") for v in vblist: val.apend(struct.pack(">H", int(v))) else: val.append(opts[op]) return struct.pack(">I", header), ''.join(val) @staticmethod def total_msgs(opts, source_bucket, source_node, source_map): source_name = source_node.get("hostname", None) if not source_name: return 0, None spec = source_map['spec'] name = source_bucket['name'] path = "/pools/default/buckets/%s/stats/curr_items" % (name) host, port, user, pswd, _ = pump.parse_spec(opts, spec, 8091) err, json, data = pump.rest_request_json(host, int(port), user, pswd, path, reason="total_msgs") if err: return 0, None nodeStats = data.get("nodeStats", None) if not nodeStats: return 0, None curr_items = nodeStats.get(source_name, None) if not curr_items: return 0, None return 0, curr_items[-1] class TapSink(pump_mc.CBSink): """Smart client sink using tap protocal to couchbase cluster.""" def __init__(self, opts, spec, source_bucket, source_node, source_map, sink_map, ctl, cur): super(TapSink, self).__init__(opts, spec, source_bucket, source_node, source_map, sink_map, ctl, cur) self.tap_name = "".join(random.sample(string.letters, 16)) @staticmethod def check_base(opts, spec): #allow destination vbucket state to be anything op = getattr(opts, "destination_operation", None) if not op in [None, 'set', 'add', 'get']: return ("error: --destination-operation unsupported value: %s" + "; use set, add, get") % (op) return pump.EndPoint.check_base(opts, spec) def find_conn(self, mconns, vbucket_id): rc, conn = super(TapSink, self).find_conn(mconns, vbucket_id) if rc != 0: return rc, None tap_opts = {memcacheConstants.TAP_FLAG_SUPPORT_ACK: ''} conn.tap_fix_flag_byteorder = version.split(".") >= ["2", "0", "0"] if self.tap_conn.tap_fix_flag_byteorder: tap_opts[memcacheConstants.TAP_FLAG_TAP_FIX_FLAG_BYTEORDER] = '' ext, val = TapSink.encode_tap_connect_opts(tap_opts) conn._sendCmd(memcacheConstants.CMD_TAP_CONNECT, self.tap_name, val, 0, ext) return rv, conn def send_msgs(self, conn, msgs, operation, vbucket_id=None): rv = super(TapSink, self).sendMsg(conn, msgs, operation, vbucket_id) if rv != 0: return rv #send vbucket recovery commit msg host, port, user, pwd, path = \ pump.parse_spec(self.opts, self.sink, 8091) params={"vbucket", vbucket_id} err, conn = \ pump.rest_request(host, int(port), user, pwd, '/pools/default/buckets/%s/commitVbucketRecovery' % self.sink_bucket, method='POST', body=params, reason='notify vbucket recovery done') if err: logging.error("error: fail to notify that vbucket msg transferring is done") return rv
if not source_design: return 0 try: sd = json.loads(source_design) if not sd: return 0 print json.dumps(sd, indent=2) except ValueError, e: return "error: could not parse source design; exception: %s" % (e) err, index_server = pump.filter_server(opts, sink_spec, 'index') if err or not index_server: logging.error("could not find index server") return 0 spec_parts = pump.parse_spec(opts, sink_spec, 8091) if not spec_parts: return "error: design sink no spec_parts: " + sink_spec host, port, user, pswd, path = spec_parts host,port = pump.hostport(index_server) sink_bucket = sink_map['buckets'][0] url = "/restoreIndexMetadata?bucket=%s" % sink_bucket['name'] #post_headers = {"Content-type": "application/x-www-form-urlencoded"} err, conn, response = \ pump.rest_request(host, couchbaseConstants.INDEX_PORT, user, pswd, opts.ssl, url, method='POST', #body=urllib.urlencode(sd), body=json.dumps(sd), #headers=post_headers, reason='restore index') print response