Esempio n. 1
0
def main(min_replica=1,
         max_replica=2,
         aws_region='vir',
         runtime_env='dev',
         dry_run=True,
         debug=True):
    '''
    Main function
    '''
    service_name = 'sample_application'
    namespace = 'alexrhino'
    if debug:
        logging.basicConfig(level=logging.DEBUG)
        logging.info("DEBUG ON")
    else:
        logging.basicConfig(level=logging.INFO)
        logging.info("DEBUG OFF")
    if int(max_replica) >= int(min_replica) and int(min_replica) > 0:
        cm = ClusterManager(runtime_env, aws_region)
        my_service = SampleApplication('sample_application', 'alexrhino')
        manifest_spec = my_service.load_general_manifest(
            'hpa', runtime_env, aws_region)
        manifest_spec['spec']['maxReplicas'] = max_replica
        manifest_spec['spec']['minReplicas'] = min_replica
        cm.deploy_manifest(manifest_spec, dry_run)
    else:
        logging.error('Wrong replica range, from %s to %s, please check',
                      min_replica, max_replica)
Esempio n. 2
0
def validate_manifests(aws_region, runtime_env, debug=True):
    set_log_level(debug)
    runtime_envs = config.get_runtime_envs(runtime_env)
    for env in runtime_envs:
        for region in config.get_regions(env, aws_region):
            cm = ClusterManager(runtime_env=env, aws_region=region)
            result = cm.export('manifests')
Esempio n. 3
0
def main(service_name='sample_application',
         aws_region='tyo',
         runtime_env='test',
         dry_run=True,
         namespace='alexrhino',
         debug=True,
         extra_vars='',
         manifest_type='configmap'):
    '''
    Main function
    '''
    if debug:
        logging.basicConfig(level=logging.DEBUG)
        logging.info("DEBUG ON")
    else:
        logging.basicConfig(level=logging.INFO)
        logging.info("DEBUG OFF")
    cluster_controller = ClusterManager(runtime_env, aws_region)
    general_service = MicroService(service_name, namespace)
    if manifest_type == 'configmap':
        manifests = general_service.get_configmap_manifests(
            runtime_env, aws_region)
    else:
        manifest = general_service.load_general_manifest(
            manifest_type, runtime_env, aws_region, extra_vars)
        manifests = [manifest]
    for manifest in manifests:
        cluster_controller.deploy_manifest(manifest, dry_run)
Esempio n. 4
0
    def consume_index(
            opts, sink_spec: str, sink_map, source_bucket, source_map,
            source_design: Union[str, bytes]) -> couchbaseConstants.PUMP_ERROR:
        if not source_design:
            return 0

        try:
            sd = json.loads(source_design)
            if not sd:
                return 0
        except ValueError as e:
            return f'error: could not parse source design; exception: {e!s}'

        try:
            sink_bucket = sink_map['buckets'][0]
            username = opts.username
            password = opts.password
            if opts.username_dest is not None and opts.password_dest is not None:
                username = opts.username_dest
                password = opts.password_dest
            rest = ClusterManager(sink_spec, username, password, opts.ssl,
                                  opts.no_ssl_verify, opts.cacert, False)
            _, errors = rest.restore_index_metadata(sink_bucket['name'], sd)
            return errors
        except ServiceNotAvailableException as e:
            return "No index service in cluster, skipping restore of indexes"
Esempio n. 5
0
    def consume_index(opts, sink_spec: str, sink_map, source_bucket,
                      source_map, source_design: Union[str, bytes]) -> couchbaseConstants.PUMP_ERROR:
        if not source_design:
            return 0

        try:
            sd = json.loads(source_design)
            if not sd:
               return 0
        except ValueError as e:
            return f'error: could not parse source design; exception: {e!s}'

        try:
            sink_bucket = sink_map['buckets'][0]
            username = opts.username
            password = opts.password
            if opts.username_dest is not None and opts.password_dest is not None:
                username = opts.username_dest
                password = opts.password_dest
            rest = ClusterManager(sink_spec, username, password, opts.ssl, opts.no_ssl_verify,
                                  opts.cacert, False)
            _, errors = rest.restore_index_metadata(sink_bucket['name'], sd)
            return errors
        except ServiceNotAvailableException as e:
            return "No index service in cluster, skipping restore of indexes"
Esempio n. 6
0
def main(image_tag,
         aws_region='vir',
         runtime_env='dev',
         dry_run=True,
         debug=True):
    '''
    Main function
    '''
    service_name = 'sample_application'
    namespace = 'alexrhino'
    if debug:
        logging.basicConfig(level=logging.DEBUG)
        logging.info("DEBUG ON")
    else:
        logging.basicConfig(level=logging.INFO)
        logging.info("DEBUG OFF")
    cm = ClusterManager(runtime_env, aws_region)
    my_service = SampleApplication('sample_application', 'alexrhino')
    image_url = my_service.get_image_url(image_tag)
    extra_vars = {'image_url': image_url}
    manifest_spec = my_service.load_general_manifest('deploy',
                                                     runtime_env,
                                                     aws_region,
                                                     extra_vars=extra_vars)
    if not dry_run:
        my_service.update_s3_image_url(runtime_env, aws_region, image_tag)
    cm.deploy_manifest(manifest_spec, dry_run)
Esempio n. 7
0
def rest_couchbase(
    opts,
    spec: str,
    check_sink_credential: bool = False
) -> Tuple[couchbaseConstants.PUMP_ERROR, Optional[Dict[str, Any]]]:
    spec = spec.replace('couchbase://', 'http://')

    username = opts.username
    password = opts.password
    if check_sink_credential and opts.username_dest is not None and opts.password_dest is not None:
        username = opts.username_dest
        password = opts.password_dest

    rest = ClusterManager(spec, username, password, opts.ssl, False, None,
                          False)

    result, errors = rest.list_buckets(True)
    if errors:
        return errors[0], None

    buckets = []
    for bucket in result:
        if bucket["bucketType"] in ["membase", "couchbase", "ephemeral"]:
            buckets.append(bucket)

    return 0, {
        'spec': spec,
        'buckets': buckets,
        'spec_parts': parse_spec(opts, spec, 8091)
    }
Esempio n. 8
0
 def provide_fts_index(opts, source_spec, source_bucket, source_map):
     try:
         rest = ClusterManager(source_spec, opts.username, opts.password,
                               opts.ssl, False, None, False)
         result, errors = rest.get_fts_index_metadata(source_bucket['name'])
         if errors:
             return errors, None
         return 0, json.dumps(result)
     except ServiceNotAvailableException, e:
         return 0, None
Esempio n. 9
0
 def provide_fts_index(opts, source_spec, source_bucket, source_map):
     try:
         rest = ClusterManager(source_spec, opts.username, opts.password, opts.ssl, False,
                               None, False)
         result, errors = rest.get_fts_index_metadata(source_bucket['name'])
         if errors:
             return errors, None
         return 0, json.dumps(result)
     except ServiceNotAvailableException, e:
         return 0, None
Esempio n. 10
0
    def test_topology_progress(self):
        client = ClusterManager('http://localhost:8091', 'u', 'p')
        tasks = TaskGetter([
            {
                'type': 'rebalance',
                'status': 'running',
                'progress': 0.5,
                'recommendedRefreshPeriod': 0.1,
                'detailedProgress': {}
            },
            {
                'type': 'rebalance',
                'status': 'running',
                'progress': 0.6,
                'recommendedRefreshPeriod': 0.1,
                'detailedProgress': {}
            },
            {
                'type': 'rebalance',
                'status': 'running',
                'progress': 0.7,
                'recommendedRefreshPeriod': 0.1,
                'detailedProgress': {}
            },
            {
                'type': 'rebalance',
                'status': 'notRunning',
                'statusIsStale': True
            },
            {
                'type': 'rebalance',
                'status': 'notRunning',
                'masterRequestTimedOut': True
            },
            {
                'type': 'rebalance',
                'status': 'running',
                'progress': 0.9,
                'recommendedRefreshPeriod': 0.1,
                'detailedProgress': {}
            },
            {
                'type': 'rebalance',
                'status': 'notRunning'
            },
        ])

        client.get_tasks = tasks.get_task

        bar = TopologyProgressBar(client, 'rebalance', True)
        err = bar.show()

        self.assertIsNone(err)
        self.assertEqual(tasks.calls, 7)
Esempio n. 11
0
 def provide_fts_alias(opts, source_spec: str, source_bucket: Dict[str, Any], source_map) -> \
         Tuple[couchbaseConstants.PUMP_ERROR, Optional[str]]:
     try:
         rest = ClusterManager(source_spec, opts.username, opts.password, opts.ssl, opts.no_ssl_verify,
                               opts.cacert, False)
         result, errors = rest.get_fts_index_alias()
         if errors:
             return errors, None
         return 0, json.dumps(result)
     except ServiceNotAvailableException:
         return 0, None
Esempio n. 12
0
 def provide_fts_alias(opts, source_spec: str, source_bucket: Dict[str, Any], source_map) -> \
         Tuple[couchbaseConstants.PUMP_ERROR, Optional[str]]:
     try:
         rest = ClusterManager(source_spec, opts.username, opts.password, opts.ssl, opts.no_ssl_verify,
                               opts.cacert, False)
         result, errors = rest.get_fts_index_alias()
         if errors:
             return errors, None
         return 0, json.dumps(result)
     except ServiceNotAvailableException as e:
         return 0, None
Esempio n. 13
0
 def provide_index(opts, source_spec, source_bucket, source_map):
     try:
         rest = ClusterManager(source_spec, opts.username, opts.password,
                               opts.ssl, opts.no_ssl_verify, opts.cacert,
                               False)
         result, errors = rest.get_index_metadata(source_bucket['name'])
         if errors:
             return errors, None
         return 0, json.dumps(result["result"])
     except ServiceNotAvailableException as e:
         return 0, None
Esempio n. 14
0
def main(aws_region='fra',
         runtime_env='test',
         namespace='alexrhino',
         debug=True,
         container='sample_application-service-log',
         tail_lines=1):
    '''
    Main function
    '''
    if debug:
        logging.basicConfig(level=logging.DEBUG)
        logging.info("DEBUG ON")
    else:
        logging.basicConfig(level=logging.INFO)
        logging.info("DEBUG OFF")
    cm = ClusterManager(runtime_env, aws_region)
    cm.get_logs(namespace, container, tail_lines)
Esempio n. 15
0
def rest_couchbase(opts, spec):
    spec = spec.replace('couchbase://', 'http://')
    spec_parts = parse_spec(opts, spec, 8091)
    rest = ClusterManager(spec, opts.username, opts.password, opts.ssl, False,
                          None, False)

    result, errors = rest.list_buckets(True)
    if errors:
        return errors[0], None

    buckets = []
    for bucket in result:
        if bucket["bucketType"] in ["membase", "couchbase", "ephemeral"]:
            buckets.append(bucket)


    return 0, {'spec': spec, 'buckets': buckets, 'spec_parts': parse_spec(opts, spec, 8091)}
Esempio n. 16
0
def rest_couchbase(opts, spec):
    spec = spec.replace('couchbase://', 'http://')
    spec_parts = parse_spec(opts, spec, 8091)
    rest = ClusterManager(spec, opts.username, opts.password, opts.ssl, False,
                          None, False)

    result, errors = rest.list_buckets(True)
    if errors:
        return errors[0], None

    buckets = []
    for bucket in result:
        if bucket["bucketType"] in ["membase", "couchbase", "ephemeral"]:
            buckets.append(bucket)


    return 0, {'spec': spec, 'buckets': buckets, 'spec_parts': parse_spec(opts, spec, 8091)}
Esempio n. 17
0
def main(pod_name,
         container,
         aws_region='vir',
         runtime_env='dev',
         namespace='alexrhino',
         command='date',
         debug=False):
    '''
    Main function
    '''
    if debug:
        logging.basicConfig(level=logging.DEBUG)
        logging.info("DEBUG ON")
    else:
        logging.basicConfig(level=logging.INFO)
        logging.info("DEBUG OFF")
    cm = ClusterManager(runtime_env, aws_region)
    cm.exec_command(namespace, pod_name, container, command)
Esempio n. 18
0
def main(aws_region='vir', runtime_env='dev', debug=False):
    '''
    Main function
    '''
    service_name = 'sample_application'
    namespace = 'alexrhino'
    if debug:
        logging.basicConfig(level=logging.DEBUG)
        logging.info("DEBUG ON")
    else:
        logging.basicConfig(level=logging.INFO)
        logging.info("DEBUG OFF")
    cm = ClusterManager(runtime_env, aws_region)
    sample_application_service = SampleApplication('sample_application',
                                                   'alexrhino')
    manifest_spec = sample_application_service.load_general_manifest(
        'deploy', runtime_env, aws_region)
    cm.list_images(manifest_spec)
Esempio n. 19
0
def main(aws_region='vir',
         runtime_env='dev',
         debug=False):
    '''
    Main function
    '''
    service_name='sample_application'
    if runtime_env == 'dev':
        namespace='alexrhino'
    else:
        namespace='alexrhino-' + runtime_env
    if debug:
        logging.basicConfig(level=logging.DEBUG)
        logging.info("DEBUG ON")
    else:
        logging.basicConfig(level=logging.INFO)
        logging.info("DEBUG OFF")
    cm = ClusterManager(runtime_env, aws_region)
    cm.list_pods(namespace)
Esempio n. 20
0
    def consume_fts_index(opts, sink_spec, sink_map, source_bucket, source_map, source_design):
        if not source_design:
            return 0

        try:
            index_defs = json.loads(source_design)
            if not index_defs:
               return 0
        except ValueError as e:
            return "error: could not parse fts index definitions; exception: %s" % (e)

        try:
            username = opts.username
            password = opts.password
            if opts.username_dest is not None and opts.password_dest is not None:
                username = opts.username_dest
                password = opts.password_dest
            rest = ClusterManager(sink_spec, username, password, opts.ssl, opts.no_ssl_verify,
                                  opts.cacert, False)
            _, errors = rest.restore_fts_index_metadata(index_defs)
            return errors
        except ServiceNotAvailableException as e:
            return "No fts service in cluster, skipping restore of indexes"
    def run_contest_remotely(self, hosts, resume_folder=None, first=True):
        self.prepare_dirs()

        if resume_folder is not None:
            contest_folder = os.path.split(self.tmp_dir)[1]
            resume_folder = os.path.join(resume_folder, contest_folder)
            shutil.rmtree(self.tmp_logs_dir)
            shutil.copytree(os.path.join(resume_folder, "logs-run"),
                            self.tmp_logs_dir)
            shutil.rmtree(self.tmp_replays_dir)
            shutil.copytree(os.path.join(resume_folder, "replays-run"),
                            self.tmp_replays_dir)
            jobs = self.resume_contest_jobs()
        else:
            jobs = self.run_contest_jobs()

        #  This is the core package to be transferable to each host
        core_req_file = TransferableFile(
            local_path=os.path.join(TMP_DIR, CORE_CONTEST_TEAM_ZIP_FILE),
            remote_path=os.path.join("/tmp", CORE_CONTEST_TEAM_ZIP_FILE),
        )

        # create cluster with hosts and jobs and run it by starting it, and then analyze output results
        # results will contain all outputs from every game played
        if first:
            cm = ClusterManager(hosts, jobs, [core_req_file])
        else:
            # subsequent contests don't need to transfer the files again
            cm = ClusterManager(hosts, jobs, None)
        # sys.exit(0)
        results = cm.start()

        print(
            "========================= GAMES FINISHED - NEXT ANALYSING OUTPUT OF GAMES ========================= "
        )
        self._analyse_all_outputs(results)
        self._calculate_team_stats()
Esempio n. 22
0
def deploy_all_regions(runtime_env,
                       namespace,
                       service_name,
                       manifest_type,
                       dry_run=True,
                       debug=True):
    '''
    Main function
    '''
    set_log_level(debug)
    all_changed = []
    runtime_envs = config.get_runtime_envs(runtime_env)
    if service_name == 'sample_application':
        my_service = SampleApplication(service_name, namespace)
    else:
        my_service = MicroService(service_name, namespace)
    for env in runtime_envs:
        for region in config.get_regions(env, 'all'):
            cm = ClusterManager(env, region)
            if config.has_manifest(env, region, namespace, service_name,
                                   manifest_type):
                manifest_spec = my_service.load_general_manifest(
                    manifest_type, runtime_env, region)
                cm.deploy_manifest(manifest_spec, dry_run)
Esempio n. 23
0
    def consume_design(
            opts, sink_spec: str, sink_map, source_bucket, source_map,
            source_design: Union[str, bytes]) -> couchbaseConstants.PUMP_ERROR:
        if not source_design:
            return 0
        try:
            sd = json.loads(source_design)
        except ValueError as e:
            return f'error: could not parse source design; exception: {e!s}'
        if not sd:
            return 0

        if (not sink_map['buckets'] or len(sink_map['buckets']) != 1
                or not sink_map['buckets'][0]
                or not sink_map['buckets'][0]['name']):
            return "error: design sink incorrect sink_map bucket"
        spec_parts = pump.parse_spec(opts, sink_spec, 8091)
        if not spec_parts:
            return "error: design sink no spec_parts: " + sink_spec
        sink_bucket = sink_map['buckets'][0]
        sink_nodes = pump.filter_bucket_nodes(sink_bucket, spec_parts) or \
            sink_bucket['nodes']
        if not sink_nodes:
            return "error: design sink nodes missing"
        couch_api_base = sink_nodes[0].get('couchApiBase')
        if not couch_api_base:
            return f'error: cannot restore bucket design on a couchbase cluster that does not support couch API;' \
                f' the couchbase cluster may be an older, pre-2.0 version; please check your cluster URL: {sink_spec}'
        host, port, user, pswd, path = \
            pump.parse_spec(opts, couch_api_base, 8092)
        if user is None:
            user = spec_parts[2]  # Default to the main REST user/pwsd.
            pswd = spec_parts[3]

        if opts.username_dest is not None and opts.password_dest is not None:
            user = opts.username_dest
            user = opts.password_dest
        if type(sd) is dict:
            id = sd.get('_id', None)
            if id:
                str_source = _to_string(source_design)
                err, conn, response = \
                    pump.rest_request(host, int(port), user, pswd, opts.ssl,
                                      f'{path}/{id}', method='PUT', body=str_source,
                                      reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert)
                if conn:
                    conn.close()
                if err:
                    return f'error: could not restore design doc id: {id}; response: {response}; err: {err}'
            else:
                stmts = sd.get('statements', [])
                hostname = f'http://{spec_parts[0]}:{spec_parts[1]!s}'
                cm = ClusterManager(hostname, user, pswd, opts.ssl)
                try:
                    for stmt in stmts:
                        result, errors = cm.n1ql_query(stmt['statement'],
                                                       stmt.get('args', None))
                        if errors:
                            logging.error(
                                f'N1QL query {stmt["statement"]} failed due to {errors}'
                            )

                        if result and 'errors' in result:
                            for error in result['errors']:
                                logging.error(
                                    f'N1QL query {stmt["statement"]} failed due to error `{error["msg"]}`'
                                )
                except ServiceNotAvailableException as e:
                    logging.error(
                        "Failed to restore indexes, cluster does not contain a query node"
                    )
        elif type(sd) is list:
            for row in sd:
                logging.debug(f'design_doc row: {row!s}')

                doc = row.get('doc', None)
                if not doc:
                    stmt = row.get('statement', None)
                    if not stmt:
                        return f'error: missing design doc or index statement in row: {row}'

                    # publish index
                    return 0

                if 'json' in doc and 'meta' in doc:
                    js = doc['json']
                    id = doc['meta'].get('id', None)
                    if not id:
                        return f'error: missing id for design doc: {row}'
                else:
                    # Handle design-doc from 2.0DP4.
                    js = doc
                    if '_rev' in js:
                        del js['_rev']
                    id = row.get('id', None)
                    if not id:
                        return f'error: missing id for row: {row}'

                js_doc = json.dumps(js)
                if id.startswith(CBSink.DDOC_HEAD):
                    id = CBSink.DDOC_HEAD + urllib.parse.quote(
                        id[len(CBSink.DDOC_HEAD):], '')
                else:
                    id = urllib.parse.quote(id, '')
                logging.debug(f'design_doc: {js_doc}')
                logging.debug(f'design_doc id: {id} at: {path}/{id}')

                try:
                    err, conn, response = \
                        pump.rest_request(host, int(port), user, pswd, opts.ssl,
                                          f'{path}/{id}', method='PUT', body=js_doc,
                                          reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert)
                    if conn:
                        conn.close()
                    if err:
                        return f'error: could not restore design doc id: {id}; response: {response}; err: {err}'
                except Exception as e:
                    return f'error: design sink exception: {e}; couch_api_base: {couch_api_base}'

                logging.debug(f'design_doc created at: {path}/{id}')

        return 0
Esempio n. 24
0
    def consume_design(opts, sink_spec: str, sink_map, source_bucket,
                       source_map, source_design: Union[str, bytes]) -> couchbaseConstants.PUMP_ERROR:
        if not source_design:
            return 0
        try:
            sd = json.loads(source_design)
        except ValueError as e:
            return f'error: could not parse source design; exception: {e!s}'
        if not sd:
            return 0

        if (not sink_map['buckets'] or
            len(sink_map['buckets']) != 1 or
            not sink_map['buckets'][0] or
            not sink_map['buckets'][0]['name']):
            return "error: design sink incorrect sink_map bucket"
        spec_parts = pump.parse_spec(opts, sink_spec, 8091)
        if not spec_parts:
            return "error: design sink no spec_parts: " + sink_spec
        sink_bucket = sink_map['buckets'][0]
        sink_nodes = pump.filter_bucket_nodes(sink_bucket, spec_parts) or \
            sink_bucket['nodes']
        if not sink_nodes:
            return "error: design sink nodes missing"
        couch_api_base = sink_nodes[0].get('couchApiBase')
        if not couch_api_base:
            return f'error: cannot restore bucket design on a couchbase cluster that does not support couch API;' \
                f' the couchbase cluster may be an older, pre-2.0 version; please check your cluster URL: {sink_spec}'
        host, port, user, pswd, path = \
            pump.parse_spec(opts, couch_api_base, 8092)
        if user is None:
            user = spec_parts[2] # Default to the main REST user/pwsd.
            pswd = spec_parts[3]

        if opts.username_dest is not None and opts.password_dest is not None:
            user = opts.username_dest
            user = opts.password_dest
        if type(sd) is dict:

            id = sd.get('_id', None)
            if id:
                str_source = _to_string(source_design)
                err, conn, response = \
                    pump.rest_request(host, int(port), user, pswd, opts.ssl,
                                      f'{path}/{id}', method='PUT', body=str_source,
                                      reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert)
                if conn:
                    conn.close()
                if err:
                    return f'error: could not restore design doc id: {id}; response: {response}; err: {err}'
            else:
                stmts = sd.get('statements', [])
                hostname = f'http://{spec_parts[0]}:{spec_parts[1]!s}'
                cm = ClusterManager(hostname, user, pswd, opts.ssl)
                try:
                    for stmt in stmts:
                        result, errors = cm.n1ql_query(stmt['statement'], stmt.get('args', None))
                        if errors:
                            logging.error(f'N1QL query {stmt["statement"]} failed due to {errors}')

                        if result and 'errors' in result:
                            for error in result['errors']:
                                logging.error(f'N1QL query {stmt["statement"]} failed due to error `{error["msg"]}`')
                except ServiceNotAvailableException as e:
                    logging.error("Failed to restore indexes, cluster does not contain a query node")
        elif type(sd) is list:
            for row in sd:
                logging.debug(f'design_doc row: {row!s}')

                doc = row.get('doc', None)
                if not doc:
                    stmt = row.get('statement', None)
                    if not stmt:
                        return f'error: missing design doc or index statement in row: {row}'
                    else:
                        #publish index
                        return 0

                if 'json' in doc and 'meta' in doc:
                    js = doc['json']
                    id = doc['meta'].get('id', None)
                    if not id:
                        return f'error: missing id for design doc: {row}'
                else:
                    # Handle design-doc from 2.0DP4.
                    js = doc
                    if '_rev' in js:
                        del js['_rev']
                    id = row.get('id', None)
                    if not id:
                        return f'error: missing id for row: {row}'

                js_doc = json.dumps(js)
                if id.startswith(CBSink.DDOC_HEAD):
                    id = CBSink.DDOC_HEAD + urllib.parse.quote(id[len(CBSink.DDOC_HEAD):], '')
                else:
                    id = urllib.parse.quote(id, '')
                logging.debug(f'design_doc: {js_doc}')
                logging.debug(f'design_doc id: {id} at: {path}/{id}')

                try:
                    err, conn, response = \
                        pump.rest_request(host, int(port), user, pswd, opts.ssl,
                                          f'{path}/{id}', method='PUT', body=js_doc,
                                          reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert)
                    if conn:
                        conn.close()
                    if err:
                        return f'error: could not restore design doc id: {id}; response: {response}; err: {err}'
                except Exception as e:
                    return f'error: design sink exception: {e}; couch_api_base: {couch_api_base}'

                logging.debug(f'design_doc created at: {path}/{id}')

        return 0
Esempio n. 25
0
        return rv

    @staticmethod
    def consume_fts_index(opts, sink_spec, sink_map, source_bucket, source_map, source_design):
        if not source_design:
            return 0

        try:
            index_defs = json.loads(source_design)
            if not index_defs:
               return 0
        except ValueError, e:
            return "error: could not parse fts index definitions; exception: %s" % (e)

        try:
            rest = ClusterManager(sink_spec, opts.username, opts.password, opts.ssl, False,
                                  None, False)
            _, errors = rest.restore_fts_index_metadata(index_defs)
            return errors
        except ServiceNotAvailableException, e:
            return "No fts service in cluster, skipping restore of indexes"

    @staticmethod
    def consume_index(opts, sink_spec, sink_map, source_bucket, source_map, source_design):
        if not source_design:
            return 0

        try:
            sd = json.loads(source_design)
            if not sd:
               return 0
        except ValueError, e:
Esempio n. 26
0
            jobs.append(
                Job(commands=commands,
                    required_files=required_files,
                    return_files=return_files,
                    id=job_id))
    return jobs


def create_hosts(workers_settings):
    hosts = []
    for worker in workers_settings:
        host = Host(
            no_cpu=worker['no_cpu'],
            hostname=worker['hostname'],
            username=worker['username'],
            password=worker['password'],
            key_filename=worker['private_key_file']
        )
        hosts.append(host)
    return hosts


if __name__ == "__main__":
    args = load_settings()
    settings = load_json(args.config_file)
    jobs = create_jobs(settings)
    workers_settings = load_json("workers.json")
    hosts = create_hosts(args.workers_file)
    cm = ClusterManager(hosts, jobs)
    results = cm.start()
Esempio n. 27
0
    def consume_design(opts, sink_spec, sink_map,
                       source_bucket, source_map, source_design):
        if not source_design:
            return 0
        try:
            sd = json.loads(source_design)
        except ValueError as e:
            return "error: could not parse source design; exception: %s" % (e)
        if not sd:
            return 0

        if (not sink_map['buckets'] or
            len(sink_map['buckets']) != 1 or
            not sink_map['buckets'][0] or
            not sink_map['buckets'][0]['name']):
            return "error: design sink incorrect sink_map bucket"
        spec_parts = pump.parse_spec(opts, sink_spec, 8091)
        if not spec_parts:
            return "error: design sink no spec_parts: " + sink_spec
        sink_bucket = sink_map['buckets'][0]
        sink_nodes = pump.filter_bucket_nodes(sink_bucket, spec_parts) or \
            sink_bucket['nodes']
        if not sink_nodes:
            return "error: design sink nodes missing"
        couch_api_base = sink_nodes[0].get('couchApiBase')
        if not couch_api_base:
            return "error: cannot restore bucket design" \
                " on a couchbase cluster that does not support couch API;" \
                " the couchbase cluster may be an older, pre-2.0 version;" \
                " please check your cluster URL: " + sink_spec
        host, port, user, pswd, path = \
            pump.parse_spec(opts, couch_api_base, 8092)
        if user is None:
            user = spec_parts[2] # Default to the main REST user/pwsd.
            pswd = spec_parts[3]

        if opts.username_dest is not None and opts.password_dest is not None:
            user = opts.username_dest
            user = opts.password_dest
        if type(sd) is dict:

            id = sd.get('_id', None)
            if id:
                err, conn, response = \
                    pump.rest_request(host, int(port), user, pswd, opts.ssl,
                                      path + "/" + id, method='PUT', body=source_design,
                                      reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert)
                if conn:
                    conn.close()
                if err:
                    return ("error: could not restore design doc id: %s" +
                            "; response: %s; err: %s") % (id, response, err)
            else:
                stmts = sd.get('statements', [])
                hostname = 'http://' + spec_parts[0] + ':' + str(spec_parts[1])
                cm = ClusterManager(hostname, user, pswd, opts.ssl)
                try:
                    for stmt in stmts:
                        result, errors = cm.n1ql_query(stmt['statement'], stmt.get('args', None))
                        if errors:
                            logging.error('N1QL query %s failed due to %s' % (stmt['statement'], errors))

                        if result and 'errors' in result:
                            for error in result['errors']:
                                logging.error('N1QL query %s failed due to error `%s`' % (stmt['statement'], error['msg']))
                except ServiceNotAvailableException as e:
                    logging.error("Failed to restore indexes, cluster does not contain a" +
                                  " query node")
        elif type(sd) is list:
            for row in sd:
                logging.debug("design_doc row: " + str(row))

                doc = row.get('doc', None)
                if not doc:
                    stmt = row.get('statement', None)
                    if not stmt:
                        return "error: missing design doc or index statement in row: %s" % (row)
                    else:
                        #publish index
                        return 0

                if 'json' in doc and 'meta' in doc:
                    js = doc['json']
                    id = doc['meta'].get('id', None)
                    if not id:
                        return "error: missing id for design doc: %s" % (row)
                else:
                    # Handle design-doc from 2.0DP4.
                    js = doc
                    if '_rev' in js:
                        del js['_rev']
                    id = row.get('id', None)
                    if not id:
                        return "error: missing id for row: %s" % (row)

                js_doc = json.dumps(js)
                if id.startswith(CBSink.DDOC_HEAD):
                    id = CBSink.DDOC_HEAD + urllib.parse.quote(id[len(CBSink.DDOC_HEAD):], '')
                else:
                    id = urllib.parse.quote(id, '')
                logging.debug("design_doc: " + js_doc)
                logging.debug("design_doc id: " + id + " at: " + path + "/" + id)

                try:
                    err, conn, response = \
                        pump.rest_request(host, int(port), user, pswd, opts.ssl,
                                          path + "/" + id, method='PUT', body=js_doc,
                                          reason="consume_design", verify=opts.no_ssl_verify, ca_cert=opts.cacert)
                    if conn:
                        conn.close()
                    if err:
                        return ("error: could not restore design doc id: %s" +
                                "; response: %s; err: %s") % (id, response, err)
                except Exception as e:
                    return ("error: design sink exception: %s" +
                            "; couch_api_base: %s") % (e, couch_api_base)

                logging.debug("design_doc created at: " + path + "/" + id)

        return 0
Esempio n. 28
0
    @staticmethod
    def consume_fts_index(opts, sink_spec, sink_map, source_bucket, source_map,
                          source_design):
        if not source_design:
            return 0

        try:
            index_defs = json.loads(source_design)
            if not index_defs:
                return 0
        except ValueError, e:
            return "error: could not parse fts index definitions; exception: %s" % (
                e)

        try:
            rest = ClusterManager(sink_spec, opts.username, opts.password,
                                  opts.ssl, False, None, False)
            _, errors = rest.restore_fts_index_metadata(index_defs)
            return errors
        except ServiceNotAvailableException, e:
            return "No fts service in cluster, skipping restore of indexes"

    @staticmethod
    def consume_index(opts, sink_spec, sink_map, source_bucket, source_map,
                      source_design):
        if not source_design:
            return 0

        try:
            sd = json.loads(source_design)
            if not sd:
                return 0
Esempio n. 29
0
    def run(self, is_local="false"):
        if is_local == "true":
            self.run_local()
            return

        start_time = datetime.now()
        end_time = datetime.now()
        time_diff = end_time - start_time
        total_seconds = time_diff.total_seconds()

        slacker.do_slack("Dataproc Cluster Status", self.job_name,
                         [{
                             "title": "Cluster Status",
                             "value": "Intiating"
                         }], 0)

        dpm = ClusterManager(self.project_config, self.cluster_config)
        dpm.create_cluster()
        dpm.wait_for_cluster_creation()

        slacker.do_slack("Dataproc Cluster Status", self.job_name,
                         [{
                             "title": "Cluster Status",
                             "value": "Created"
                         }], 0)

        cm = CodeManager(self.job_config.get('job_name'))
        job_config_gcs = cm.upload_files_to_gcs()

        job_config_gcs.update(self.job_config)
        job_id = dpm.submit_pyspark_job(job_config_gcs)

        slacker.do_slack("Dataproc Job Status", self.job_name,
                         [{
                             "title": "Job Status",
                             "value": "Submitted and Running"
                         }], 0)

        dpm.wait_for_job(job_id)

        slacker.do_slack("Dataproc Job Status", self.job_name,
                         [{
                             "title": "Job Status",
                             "value": "Completed"
                         }], 0)

        dpm.delete_cluster()

        slacker.do_slack("Dataproc Cluster Status", self.job_name,
                         [{
                             "title": "Cluster Status",
                             "value": "Deleting"
                         }], 0)
Esempio n. 30
0
from color_manager import ColorRequestManager
from cluster_manager import ClusterManager
from dotenv import load_dotenv
import itertools
import os

load_dotenv()
mongo_uri = os.getenv("MONGO")

app = Flask(__name__)
CORS(app)
app.config["MONGO_URI"] = mongo_uri
mongo = PyMongo(app)

db = mongo.cx.music  # get to db via MongoClient
clusterer = ClusterManager()
NEXT_ID = 0


def generate_album(doc):
    album = dict()

    global NEXT_ID
    album["id"] = NEXT_ID
    NEXT_ID += 1

    album["name"] = doc["album"]
    album["artist"] = doc["artist"]
    album["year"] = doc["year"]
    album["genres"] = doc["genres"]
    album["spotifyUrl"] = doc["spotify_url"]