def add_nodes(data, cloud, job_id, general_config): """ Add nodes to a cluster and updates the job object @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict @param job_id: Job ID @type job_id: string @param general_config: General config parameters of multistack @type general_config: dict """ job_db_item = multistack.main.mongo.db.job.find_one({"_id": objectid.ObjectId(job_id)}) job_obj = job_db_item['job'] job_name = job_obj['name'] new_node_obj_list = list() initiate_cloud(cloud['provider'], job_name, cloud['auth']) key_location = '/tmp/' + current_app.cloud.keypair + '.pem' for slave in data['slaves']: res_slave = current_app.cloud.boot_instances( slave['instances'], current_app.cloud.keypair, [current_app.cloud.slave_security_group], slave['flavor'], cloud['default_image_id'] ) # Incrementing the number of slaves in job object for count in range (0, len(job_obj['slaves'])): if slave['flavor'] == job_obj['slaves'][count]['flavor']: job_obj['slaves'][count]['instances'] += 1 node_obj = get_node_objects("slave", res_slave.id) job_obj['nodes'] += node_obj new_node_obj_list += node_obj job_db_item['job'] = job_obj flush_data_to_mongo('job', job_db_item) for new_node_obj in new_node_obj_list: slave_public_ip = current_app.cloud.associate_public_ip(new_node_obj['id']) configure_slave(slave_public_ip, key_location, job_name, cloud['user'], general_config['chef_server_hostname'], general_config['chef_server_ip']) current_app.cloud.release_public_ip(slave_public_ip)
def add_nodes(data, cloud, job_id, general_config): """ Add nodes to a cluster and updates the job object @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict @param job_id: Job ID @type job_id: string @param general_config: General config parameters of multistack @type general_config: dict """ conn = ec2.make_connection(cloud['auth']) job_db_item = multistack.main.mongo.db.job.find_one({"_id": objectid.ObjectId(job_id)}) job_obj = job_db_item['job'] job_name = job_obj['name'] new_node_obj_list = list() keypair_name, sec_master, sec_slave = ec2.ec2_entities(job_name) key_location = '/tmp/' + keypair_name + '.pem' for slave in data['slaves']: res_slave = ec2.boot_instances( conn, slave['instances'], keypair_name, [sec_master], slave['flavor'], cloud['default_image_id'] ) # Incrementing the number of slaves in job object for count in range (0, len(job_obj['slaves'])): if slave['flavor'] == job_obj['slaves'][count]['flavor']: job_obj['slaves'][count]['instances'] += 1 node_obj = get_node_objects(conn, "slave", res_slave.id) job_obj['nodes'] += node_obj new_node_obj_list += node_obj job_db_item['job'] = job_obj flush_data_to_mongo('job', job_db_item) for new_node_obj in new_node_obj_list: configure_slave(new_node_obj['ip_address'], key_location, job_name, cloud['user'], general_config['chef_server_hostname'], general_config['chef_server_ip'])
def add_nodes(data, cloud, job_id, general_config): """ Add nodes to a cluster and updates the job object @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict @param job_id: Job ID @type job_id: string @param general_config: General config parameters of multistack @type general_config: dict """ job_db_item = multistack.main.mongo.db.job.find_one( {"_id": objectid.ObjectId(job_id)}) job_obj = job_db_item['job'] job_name = job_obj['name'] new_node_obj_list = list() initiate_cloud(cloud['provider'], job_name, cloud['auth']) key_location = '/tmp/' + current_app.cloud.keypair + '.pem' for slave in data['slaves']: res_slave = current_app.cloud.boot_instances( slave['instances'], current_app.cloud.keypair, [current_app.cloud.slave_security_group], slave['flavor'], cloud['default_image_id']) # Incrementing the number of slaves in job object for count in range(0, len(job_obj['slaves'])): if slave['flavor'] == job_obj['slaves'][count]['flavor']: job_obj['slaves'][count]['instances'] += 1 node_obj = get_node_objects("slave", res_slave.id) job_obj['nodes'] += node_obj new_node_obj_list += node_obj job_db_item['job'] = job_obj flush_data_to_mongo('job', job_db_item) for new_node_obj in new_node_obj_list: slave_public_ip = current_app.cloud.associate_public_ip( new_node_obj['id']) configure_slave(slave_public_ip, key_location, job_name, cloud['user'], general_config['chef_server_hostname'], general_config['chef_server_ip']) current_app.cloud.release_public_ip(slave_public_ip)
def add_nodes(data, cloud, job_id, general_config): """ Add nodes to a cluster and updates the job object @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict @param job_id: Job ID @type job_id: string @param general_config: General config parameters of multistack @type general_config: dict """ conn = ec2.make_connection(cloud['auth']) job_db_item = multistack.main.mongo.db.job.find_one( {"_id": objectid.ObjectId(job_id)}) job_obj = job_db_item['job'] job_name = job_obj['name'] new_node_obj_list = list() keypair_name, sec_master, sec_slave = ec2.ec2_entities(job_name) key_location = '/tmp/' + keypair_name + '.pem' for slave in data['slaves']: res_slave = ec2.boot_instances(conn, slave['instances'], keypair_name, [sec_master], slave['flavor'], cloud['default_image_id']) # Incrementing the number of slaves in job object for count in range(0, len(job_obj['slaves'])): if slave['flavor'] == job_obj['slaves'][count]['flavor']: job_obj['slaves'][count]['instances'] += 1 node_obj = get_node_objects(conn, "slave", res_slave.id) job_obj['nodes'] += node_obj new_node_obj_list += node_obj job_db_item['job'] = job_obj flush_data_to_mongo('job', job_db_item) for new_node_obj in new_node_obj_list: configure_slave(new_node_obj['ip_address'], key_location, job_name, cloud['user'], general_config['chef_server_hostname'], general_config['chef_server_ip'])
def set_conf(conf_dir="/etc/multistack"): conf = parse_multistack_conf(join(conf_dir, 'multistack.conf')) cloud_dir = join(conf_dir, 'clouds') clouds = list() for cloud_file in listdir(cloud_dir): if cloud_file.split('.')[-1] == 'conf': clouds.append(parse_cloud_conf(join(cloud_dir, cloud_file))) conf['clouds'] = clouds multistack.main.mongo.db.conf.remove() flush_data_to_mongo('conf', conf)
def set_conf(conf_dir = "/etc/multistack"): conf = parse_multistack_conf(join(conf_dir, 'multistack.conf')) cloud_dir = join(conf_dir, 'clouds') clouds = list() for cloud_file in listdir(cloud_dir): if cloud_file.split('.')[-1] == 'conf': clouds.append(parse_cloud_conf(join(cloud_dir, cloud_file))) conf['clouds'] = clouds print conf['clouds'] multistack.main.mongo.db.conf.remove() flush_data_to_mongo('conf', conf)
def update_quota(data, cloud, operation): """Update the avaiable quota of a cloud""" if operation == ('delete' or 'remove'): ram, vcpus, instances = calculate_usage(cloud, data) cloud['quota']['available']['ram'] += ram cloud['quota']['available']['vcpus'] += vcpus cloud['quota']['available']['instances'] += instances if operation == ('add' or 'create'): ram, vcpus, instances = calculate_usage(cloud, data) cloud['quota']['available']['ram'] -= ram cloud['quota']['available']['vcpus'] -= vcpus cloud['quota']['available']['instances'] -= instances conf = config.read_conf() for i in range(0, len(conf['clouds'])): if conf['clouds'][i]['id'] == cloud['id']: conf['clouds'][i] = cloud db.flush_data_to_mongo('conf', conf)
def update_quota(data, cloud, operation): """Update the avaiable quota of a cloud""" if operation == ('delete' or 'remove'): ram, vcpus, instances = calculate_usage(cloud, data) cloud['quota']['available']['ram'] += ram cloud['quota']['available']['vcpus'] += vcpus cloud['quota']['available']['instances'] += instances if operation == ('add' or 'create'): ram, vcpus, instances = calculate_usage(cloud, data) cloud['quota']['available']['ram'] -= ram cloud['quota']['available']['vcpus'] -= vcpus cloud['quota']['available']['instances'] -= instances conf = config.read_conf() for i in range(0, len(conf['clouds'])): if conf['clouds'][i]['id'] == cloud['id']: conf['clouds'][i] = cloud db.flush_data_to_mongo('conf', conf)
def remove_nodes(data, cloud, job_id): """ Removes Nodes from a running cluster and updates the Job object. @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict @param job_id: Job ID @type job_id: string """ job_db_item = multistack.main.mongo.db.job.find_one( {"_id": objectid.ObjectId(job_id)}) job_obj = job_db_item['job'] job_name = job_obj['name'] initiate_cloud(job_obj['cloud'], job_name, cloud['auth']) for slave in data['slaves']: for node in job_obj['nodes']: if slave['flavor'] == node['flavor'] and node['role'] != 'master': current_app.cloud.terminate_instances(node['id'].split()) slave['instances'] -= 1 job_obj['nodes'].remove(node) if slave['instances'] == 0: break # Decrementing the number of slaves in job object for count in range(0, len(job_obj['slaves'])): if slave['flavor'] == job_obj['slaves'][count]['flavor']: job_obj['slaves'][count]['instances'] -= 1 job_db_item['job'] = job_obj flush_data_to_mongo('job', job_db_item)
def remove_nodes(data, cloud, job_id): """ Removes Nodes from a running cluster and updates the Job object. @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict @param job_id: Job ID @type job_id: string """ job_db_item = multistack.main.mongo.db.job.find_one({"_id": objectid.ObjectId(job_id)}) job_obj = job_db_item['job'] job_name = job_obj['name'] initiate_cloud(job_obj['cloud'], job_name, cloud['auth']) for slave in data['slaves']: for node in job_obj['nodes']: if slave['flavor'] == node['flavor'] and node['role'] != 'master': current_app.cloud.terminate_instances(node['id'].split()) slave['instances'] -=1 job_obj['nodes'].remove(node) if slave['instances'] == 0: break # Decrementing the number of slaves in job object for count in range (0, len(job_obj['slaves'])): if slave['flavor'] == job_obj['slaves'][count]['flavor']: job_obj['slaves'][count]['instances'] -= 1 job_db_item['job'] = job_obj flush_data_to_mongo('job', job_db_item)
def create(data): # Validation validation_result = validate(data) create_ret = dict() if validation_result != True: return validation_result multistack.main.mongo.db.job.insert(data) id_t = str(data['_id']) data['job']['id'] = id_t flush_data_to_mongo('job', data) set_prefixed_format(id_t) if schedule(data, 'create'): create_ret['job_id'] = id_t return make_response(jsonify(**create_ret), 202) else: create_ret['error'] = "job_init_failed" return make_response(jsonify(**create_ret), 500) return make_response(jsonify(**create_ret), 202)
def create(data): # Validation validation_result = validate(data) create_ret = dict() if validation_result != True: return validation_result multistack.main.mongo.db.job.insert(data) id_t = str(data['_id']) data['job']['id'] = id_t flush_data_to_mongo('job', data) set_prefixed_format(id_t) if schedule(data, 'create'): create_ret['job_id'] = id_t return make_response(jsonify(**create_ret), 202) else: create_ret['error'] = "job_init_failed" return make_response(jsonify(**create_ret), 500) return make_response(jsonify(**create_ret), 202)
def spawn(data, cloud): """ Generates the keypair and creates security groups specific to the cluster and boots the instances. @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict """ image_id = cloud['default_image_id'] data['job']['nodes'] = [] data['job']['status'] = 'spawning' flush_data_to_mongo('job', data) job_name = data['job']['name'] keypair_name, sec_master, sec_slave = ec2.ec2_entities(job_name) conn = ec2.make_connection(cloud['auth']) ec2.create_keypair(conn, keypair_name) ec2.create_security_groups(conn, sec_master, sec_slave) master = data['job']['master'] res_master = ec2.boot_instances( conn, 1, keypair_name, [sec_master], flavor = master['flavor'], image_id = image_id ) data['job']['nodes'] += get_node_objects(conn, "master", res_master.id) flush_data_to_mongo('job', data) for slave in data['job']['slaves']: res_slave = ec2.boot_instances( conn, slave['instances'], keypair_name, [sec_slave], flavor = slave['flavor'], image_id = image_id ) data['job']['nodes'] += get_node_objects(conn, "slave", res_slave.id) flush_data_to_mongo('job', data) return
def spawn(data, cloud): """ Generates the keypair and creates security groups specific to the cluster and boots the instances. @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict """ image_id = cloud['default_image_id'] data['job']['nodes'] = [] data['job']['status'] = 'spawning' flush_data_to_mongo('job', data) cloud = current_app.cloud cloud.create_keypair(cloud.keypair) cloud.create_security_groups(cloud.master_security_group, cloud.slave_security_group) master = data['job']['master'] data['job']['nodes'] += cloud.boot_instances( cloud.master_name, 1, cloud.keypair, [cloud.master_security_group], flavor = master['flavor'], image_id = image_id ) flush_data_to_mongo('job', data) for slave in data['job']['slaves']: data['job']['nodes'] += cloud.boot_instances( cloud.slave_name, slave['instances'], cloud.keypair, [cloud.slave_security_group], flavor = slave['flavor'], image_id = image_id ) flush_data_to_mongo('job', data) return
def spawn(data, cloud): """ Generates the keypair and creates security groups specific to the cluster and boots the instances. @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict """ image_id = cloud['default_image_id'] data['job']['nodes'] = [] data['job']['status'] = 'spawning' flush_data_to_mongo('job', data) job_name = data['job']['name'] keypair_name, sec_master, sec_slave = ec2.ec2_entities(job_name) conn = ec2.make_connection(cloud['auth']) ec2.create_keypair(conn, keypair_name) ec2.create_security_groups(conn, sec_master, sec_slave) master = data['job']['master'] res_master = ec2.boot_instances(conn, 1, keypair_name, [sec_master], flavor=master['flavor'], image_id=image_id) data['job']['nodes'] += get_node_objects(conn, "master", res_master.id) flush_data_to_mongo('job', data) for slave in data['job']['slaves']: res_slave = ec2.boot_instances(conn, slave['instances'], keypair_name, [sec_slave], flavor=slave['flavor'], image_id=image_id) data['job']['nodes'] += get_node_objects(conn, "slave", res_slave.id) flush_data_to_mongo('job', data) return
def spawn(data, cloud): """ Generates the keypair and creates security groups specific to the cluster and boots the instances. @param data: The job document stored in mongo database. @type data: dict @param cloud: Cloud object containing information of a specific cloud provider. @type cloud: dict """ image_id = cloud['default_image_id'] data['job']['nodes'] = [] data['job']['status'] = 'spawning' flush_data_to_mongo('job', data) cloud = current_app.cloud cloud.create_keypair(cloud.keypair) cloud.create_security_groups(cloud.master_security_group, cloud.slave_security_group) master = data['job']['master'] data['job']['nodes'] += cloud.boot_instances(cloud.master_name, 1, cloud.keypair, [cloud.master_security_group], flavor=master['flavor'], image_id=image_id) flush_data_to_mongo('job', data) for slave in data['job']['slaves']: data['job']['nodes'] += cloud.boot_instances( cloud.slave_name, slave['instances'], cloud.keypair, [cloud.slave_security_group], flavor=slave['flavor'], image_id=image_id) flush_data_to_mongo('job', data) return
def schedule(data, operation): """Schedules based on certain filters""" if operation == 'create': conf = config.read_conf() clouds = filter_quota(data, conf) if clouds == []: return False cloud = filter_priority(clouds) if cloud is None: return False data['job']['cloud'] = cloud['name'] db.flush_data_to_mongo('job', data) update_quota(data, cloud, operation) Process(target = cluster.create, args = (data, cloud, conf['general'])).start() elif operation == 'delete': job_id = data['job']['id'] conf = config.read_conf() for cloud in conf['clouds']: if cloud['name'] == data['job']['cloud']: break Process(target = cluster.delete, args = (job_id, cloud)).start() update_quota(data, cloud, operation) elif operation == 'add': job_id = data['id'] if multistack.services.job.info(job_id)[0]: job_obj = multistack.services.job.info(job_id)[1] else: return False conf = config.read_conf() for cloud in conf['clouds']: if cloud['name'] == job_obj['job']['cloud']: break new_req_obj = dict() new_req_obj['job'] = data update_quota(new_req_obj, cloud, operation) Process(target = cluster.add_nodes, args = (data, cloud, job_id, conf['general'])).start() elif operation == 'remove': job_id = data['id'] if multistack.services.job.info(job_id)[0]: job_obj = multistack.services.job.info(job_id)[1] else: return False conf = config.read_conf() for cloud in conf['clouds']: if cloud['name'] == job_obj['job']['cloud']: break new_req_obj = dict() new_req_obj['job'] = data Process(target = cluster.remove_nodes, args = (data, cloud, job_id)).start() update_quota(new_req_obj, cloud, operation) return True
def schedule(data, operation): """Schedules based on certain filters""" if operation == 'create': conf = config.read_conf() clouds = filter_quota(data, conf) if clouds == []: return False cloud = filter_priority(clouds) if cloud is None: return False data['job']['cloud'] = cloud['name'] db.flush_data_to_mongo('job', data) update_quota(data, cloud, operation) Process(target=cluster.create, args=(data, cloud, conf['general'])).start() elif operation == 'delete': job_id = data['job']['id'] conf = config.read_conf() for cloud in conf['clouds']: if cloud['name'] == data['job']['cloud']: break Process(target=cluster.delete, args=(job_id, cloud)).start() update_quota(data, cloud, operation) elif operation == 'add': job_id = data['id'] if multistack.services.job.info(job_id)[0]: job_obj = multistack.services.job.info(job_id)[1] else: return False conf = config.read_conf() for cloud in conf['clouds']: if cloud['name'] == job_obj['job']['cloud']: break new_req_obj = dict() new_req_obj['job'] = data update_quota(new_req_obj, cloud, operation) Process(target=cluster.add_nodes, args=(data, cloud, job_id, conf['general'])).start() elif operation == 'remove': job_id = data['id'] if multistack.services.job.info(job_id)[0]: job_obj = multistack.services.job.info(job_id)[1] else: return False conf = config.read_conf() for cloud in conf['clouds']: if cloud['name'] == job_obj['job']['cloud']: break new_req_obj = dict() new_req_obj['job'] = data Process(target=cluster.remove_nodes, args=(data, cloud, job_id)).start() update_quota(new_req_obj, cloud, operation) return True