예제 #1
0
def main():
  """ This main function allows you to run the groomer manually. """
  zk_connection_locations = appscale_info.get_zk_locations_string()
  zookeeper = zk.ZKTransaction(host=zk_connection_locations, start_gc=False)
  db_info = appscale_info.get_db_info()
  table = db_info[':table']
  master = appscale_info.get_db_master_ip()
  datastore_path = "{0}:8888".format(master)
  ds_groomer = DatastoreGroomer(zookeeper, table, datastore_path)

  logging.debug("Trying to get groomer lock.")
  if ds_groomer.get_groomer_lock():
    logging.info("Got the groomer lock.")
    try:
      ds_groomer.run_groomer()
    except Exception as exception:
      logging.exception('Encountered exception {} while running the groomer.'
        .format(str(exception)))
    try:
      ds_groomer.zoo_keeper.release_lock_with_path(zk.DS_GROOM_LOCK_PATH)
    except zk.ZKTransactionException, zk_exception:
      logging.error("Unable to release zk lock {0}.".\
        format(str(zk_exception)))
    except zk.ZKInternalException, zk_exception:
      logging.error("Unable to release zk lock {0}.".\
        format(str(zk_exception)))
예제 #2
0
def wait_for_quorum(keyname, db_nodes, replication):
  """ Waits until enough Cassandra nodes are up for a quorum.

  Args:
    keyname: A string containing the deployment's keyname.
    db_nodes: An integer specifying the total number of DB nodes.
    replication: An integer specifying the keyspace replication factor.
  """
  command = cassandra_interface.NODE_TOOL + " " + 'status'
  key_file = '{}/{}.key'.format(utils.KEY_DIRECTORY, keyname)
  ssh_cmd = ['ssh', '-i', key_file, appscale_info.get_db_master_ip(), command]

  # Determine the number of nodes needed for a quorum.
  if db_nodes < 1 or replication < 1:
    raise dbconstants.AppScaleDBError('At least 1 database machine is needed.')
  if replication > db_nodes:
    raise dbconstants.AppScaleDBError(
      'The replication factor cannot exceed the number of database machines.')
  can_fail = math.ceil(replication/2.0 - 1)
  needed = int(db_nodes - can_fail)

  while True:
    output = subprocess.check_output(ssh_cmd)
    nodes_ready = len(
      [line for line in output.splitlines() if line.startswith('UN')])
    logging.info('{} nodes are up. {} are needed.'.format(nodes_ready, needed))
    if nodes_ready >= needed:
      break
    time.sleep(1)
예제 #3
0
def main():
  """ This main function allows you to run the groomer manually. """
  zk_connection_locations = appscale_info.get_zk_locations_string()
  zookeeper = zk.ZKTransaction(host=zk_connection_locations)
  db_info = appscale_info.get_db_info()
  table = db_info[':table']
  master = appscale_info.get_db_master_ip()
  datastore_path = "{0}:8888".format(master)
  ds_groomer = DatastoreGroomer(zookeeper, table, datastore_path)

  logging.debug("Trying to get groomer lock.")
  if ds_groomer.get_groomer_lock():
    logging.info("Got the groomer lock.")
    try:
      ds_groomer.run_groomer()
    except Exception as exception:
      logging.exception('Encountered exception {} while running the groomer.'
        .format(str(exception)))
    try:
      ds_groomer.zoo_keeper.release_lock_with_path(zk.DS_GROOM_LOCK_PATH)
    except zk.ZKTransactionException, zk_exception:
      logging.error("Unable to release zk lock {0}.".\
        format(str(zk_exception)))
    except zk.ZKInternalException, zk_exception:
      logging.error("Unable to release zk lock {0}.".\
        format(str(zk_exception)))
예제 #4
0
def wait_for_quorum(keyname, db_nodes, replication):
    """ Waits until enough Cassandra nodes are up for a quorum.

  Args:
    keyname: A string containing the deployment's keyname.
    db_nodes: An integer specifying the total number of DB nodes.
    replication: An integer specifying the keyspace replication factor.
  """
    command = cassandra_interface.NODE_TOOL + " " + 'status'
    key_file = '{}/{}.key'.format(utils.KEY_DIRECTORY, keyname)
    ssh_cmd = [
        'ssh', '-i', key_file,
        appscale_info.get_db_master_ip(), command
    ]

    # Determine the number of nodes needed for a quorum.
    if db_nodes < 1 or replication < 1:
        raise dbconstants.AppScaleDBError(
            'At least 1 database machine is needed.')
    if replication > db_nodes:
        raise dbconstants.AppScaleDBError(
            'The replication factor cannot exceed the number of database machines.'
        )
    can_fail = math.ceil(replication / 2.0 - 1)
    needed = int(db_nodes - can_fail)

    while True:
        output = subprocess.check_output(ssh_cmd)
        nodes_ready = len(
            [line for line in output.splitlines() if line.startswith('UN')])
        logging.info('{} nodes are up. {} are needed.'.format(
            nodes_ready, needed))
        if nodes_ready >= needed:
            break
        time.sleep(1)
예제 #5
0
def get_soap_accessor():
    """ Returns the SOAP server accessor to deal with application and users.

  Returns:
    A soap server accessor.
  """
    db_ip = appscale_info.get_db_master_ip()
    bindport = constants.UA_SERVER_PORT
    return SOAPpy.SOAPProxy("https://{0}:{1}".format(db_ip, bindport))
예제 #6
0
def get_soap_accessor():
  """ Returns the SOAP server accessor to deal with application and users.

  Returns:
    A soap server accessor.
  """
  db_ip = appscale_info.get_db_master_ip()
  bindport = constants.UA_SERVER_PORT
  return SOAPpy.SOAPProxy("https://{0}:{1}".format(db_ip, bindport))
예제 #7
0
def main():
  """ This main function allows you to run the groomer manually. """
  zk_connection_locations = appscale_info.get_zk_locations_string()
  zookeeper = zk.ZKTransaction(host=zk_connection_locations)
  db_info = appscale_info.get_db_info()
  table = db_info[':table']
  master = appscale_info.get_db_master_ip()
  datastore_path = "{0}:8888".format(master)
  ds_groomer = DatastoreGroomer(zookeeper, table, datastore_path)
  try:
    ds_groomer.run_groomer()
  finally:
    zookeeper.close()
예제 #8
0
파일: groomer.py 프로젝트: kleitz/appscale
def main():
    """ This main function allows you to run the groomer manually. """
    zk_connection_locations = appscale_info.get_zk_locations_string()
    zookeeper = zk.ZKTransaction(host=zk_connection_locations)
    db_info = appscale_info.get_db_info()
    table = db_info[':table']
    master = appscale_info.get_db_master_ip()
    datastore_path = "{0}:8888".format(master)
    ds_groomer = DatastoreGroomer(zookeeper, table, datastore_path)
    try:
        ds_groomer.run_groomer()
    finally:
        zookeeper.close()
예제 #9
0
  def __init__(self):
    """ DistributedTaskQueue Constructor. """
    file_io.set_logging_format()
    file_io.mkdir(self.LOG_DIR)
    file_io.mkdir(TaskQueueConfig.CELERY_WORKER_DIR)
    file_io.mkdir(TaskQueueConfig.CELERY_CONFIG_DIR)

    setup_env()

    master_db_ip = appscale_info.get_db_master_ip()
    connection_str = master_db_ip + ":" + str(constants.DB_SERVER_PORT)
    ds_distrib = datastore_distributed.DatastoreDistributed(
    constants.DASHBOARD_APP_ID, connection_str, False, False)
    apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', ds_distrib)
    os.environ['APPLICATION_ID'] = constants.DASHBOARD_APP_ID
예제 #10
0
def deploy_apps(app_paths):
    """ Deploys all apps that reside in /opt/appscale/apps.

  Args:
    app_paths: A list of the full paths of the apps to be deployed.
  Returns:
    True on success, False otherwise.
  """
    uaserver = SOAPpy.SOAPProxy('https://{0}:{1}'.format(
        appscale_info.get_db_master_ip(), UA_SERVER_PORT))

    acc = AppControllerClient(appscale_info.get_login_ip(),
                              appscale_info.get_secret())

    # Wait for Cassandra to come up after a restore.
    time.sleep(15)

    for app_path in app_paths:
        # Extract app ID.
        app_id = app_path[app_path.rfind('/') + 1:app_path.find('.')]
        if not app_id:
            logging.error(
                "Malformed source code archive. Cannot complete "
                "application recovery for '{}'. Aborting...".format(app_path))
            return False

        # Retrieve app admin via uaserver.
        app_data = uaserver.get_app_data(app_id, appscale_info.get_secret())

        app_admin_re = re.search("\napp_owner:(.+)\n", app_data)
        if app_admin_re:
            app_admin = app_admin_re.group(1)
        else:
            logging.error(
                "Missing application data. Cannot complete application "
                "recovery for '{}'. Aborting...".format(app_id))
            return False

        file_suffix = re.search("\.(.*)\Z", app_path).group(1)

        logging.warning(
            "Restoring app '{}', from '{}', with owner '{}'.".format(
                app_id, app_path, app_admin))

        acc.upload_app(app_path, file_suffix, app_admin)

    return True
예제 #11
0
    def __init__(self):
        """ DistributedTaskQueue Constructor. """
        file_io.set_logging_format()
        file_io.mkdir(self.LOG_DIR)
        file_io.mkdir(TaskQueueConfig.CELERY_WORKER_DIR)
        file_io.mkdir(TaskQueueConfig.CELERY_CONFIG_DIR)

        setup_env()

        # Cache all queue information in memory.
        self.__queue_info_cache = {}

        master_db_ip = appscale_info.get_db_master_ip()
        connection_str = master_db_ip + ":" + str(constants.DB_SERVER_PORT)
        ds_distrib = datastore_distributed.DatastoreDistributed(
            constants.DASHBOARD_APP_ID, connection_str, require_indexes=False)
        apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', ds_distrib)
        os.environ['APPLICATION_ID'] = constants.DASHBOARD_APP_ID
예제 #12
0
def deploy_apps(app_paths):
  """ Deploys all apps that reside in /opt/appscale/apps.

  Args:
    app_paths: A list of the full paths of the apps to be deployed.
  Returns:
    True on success, False otherwise.
  """
  uaserver = SOAPpy.SOAPProxy('https://{0}:{1}'.format(
    appscale_info.get_db_master_ip(), UA_SERVER_PORT))

  acc = AppControllerClient(appscale_info.get_login_ip(),
    appscale_info.get_secret())

  # Wait for Cassandra to come up after a restore.
  time.sleep(15)

  for app_path in app_paths:
    # Extract app ID.
    app_id = app_path[app_path.rfind('/')+1:app_path.find('.')]
    if not app_id:
      logging.error("Malformed source code archive. Cannot complete "
        "application recovery for '{}'. Aborting...".format(app_path))
      return False

    # Retrieve app admin via uaserver.
    app_data = uaserver.get_app_data(app_id, appscale_info.get_secret())

    app_admin_re = re.search("\napp_owner:(.+)\n", app_data)
    if app_admin_re:
      app_admin = app_admin_re.group(1)
    else:
      logging.error("Missing application data. Cannot complete application "
        "recovery for '{}'. Aborting...".format(app_id))
      return False

    file_suffix = re.search("\.(.*)\Z", app_path).group(1)

    logging.warning("Restoring app '{}', from '{}', with owner '{}'.".
      format(app_id, app_path, app_admin))

    acc.upload_app(app_path, file_suffix, app_admin)

  return True
예제 #13
0
def get_node_info():
    """ Creates a list of JSON objects that contain node information and are
  needed to perform a backup/restore task on the current AppScale deployment.
  """

    # TODO
    # Add logic for choosing minimal set of nodes that need to perform a task.
    # e.g. Only the node that owns the entire keyspace.

    nodes = [{
        NodeInfoTags.HOST:
        get_br_service_url(appscale_info.get_db_master_ip()),
        NodeInfoTags.ROLE:
        'db_master',
        NodeInfoTags.INDEX:
        None
    }]

    index = 0
    for node in appscale_info.get_db_slave_ips():
        host = get_br_service_url(node)
        # Make sure we don't send the same request on DB roles that reside on the
        # same node.
        if host not in nodes[0].values():
            nodes.append({
                NodeInfoTags.HOST: host,
                NodeInfoTags.ROLE: 'db_slave',
                NodeInfoTags.INDEX: index
            })
            index += 1

    index = 0
    for node in appscale_info.get_zk_node_ips():
        nodes.append({
            NodeInfoTags.HOST: get_br_service_url(node),
            NodeInfoTags.ROLE: 'zk',
            NodeInfoTags.INDEX: index
        })
        index += 1

    return nodes
예제 #14
0
def deploy_sensor_app():
    """ Uploads the sensor app for registered deployments. """

    deployment_id = helper.get_deployment_id()
    #If deployment is not registered, then do nothing.
    if not deployment_id:
        return

    uaserver = SOAPpy.SOAPProxy('https://{0}:{1}'.format(
        appscale_info.get_db_master_ip(), hermes_constants.UA_SERVER_PORT))

    # If the appscalesensor app is already running, then do nothing.
    is_app_enabled = uaserver.is_app_enabled(hermes_constants.APPSCALE_SENSOR,
                                             appscale_info.get_secret())
    if is_app_enabled == "true":
        return

    pwd = appscale_utils.encrypt_password(
        hermes_constants.USER_EMAIL,
        appscale_utils.random_password_generator())
    if create_appscale_user(pwd, uaserver) and create_xmpp_user(pwd, uaserver):
        logging.debug("Created new user and now tarring app to be deployed.")
        file_path = os.path.join(os.path.dirname(__file__), '../Apps/sensor')
        app_dir_location = os.path.join(hermes_constants.APP_DIR_LOCATION,
                                        hermes_constants.APPSCALE_SENSOR)
        archive = tarfile.open(app_dir_location, "w|gz")
        archive.add(file_path, arcname=hermes_constants.APPSCALE_SENSOR)
        archive.close()

        try:
            logging.info(
                "Deploying the sensor app for registered deployments.")
            acc = appscale_info.get_appcontroller_client()
            acc.upload_app(app_dir_location, hermes_constants.FILE_SUFFIX,
                           hermes_constants.USER_EMAIL)
        except AppControllerException:
            logging.exception("AppControllerException while trying to deploy "
                              "appscalesensor app.")
    else:
        logging.error("Error while creating or accessing the user to deploy "
                      "appscalesensor app.")
예제 #15
0
  def __init__(self):
    """ DistributedTaskQueue Constructor. """
    file_io.set_logging_format()
    file_io.mkdir(self.LOG_DIR)
    file_io.mkdir(TaskQueueConfig.CELERY_WORKER_DIR)
    file_io.mkdir(TaskQueueConfig.CELERY_CONFIG_DIR)

    setup_env()
  
    # Cache all queue information in memory.
    self.__queue_info_cache = {}

    master_db_ip = appscale_info.get_db_master_ip()
    connection_str = master_db_ip + ":" + str(constants.DB_SERVER_PORT)
    ds_distrib = datastore_distributed.DatastoreDistributed(
      constants.DASHBOARD_APP_ID, connection_str, require_indexes=False)
    apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', ds_distrib)
    os.environ['APPLICATION_ID'] = constants.DASHBOARD_APP_ID

    # Flag to see if code needs to be reloaded.
    self.__force_reload = False
예제 #16
0
파일: hermes.py 프로젝트: eabyshev/appscale
def deploy_sensor_app():
  """ Uploads the sensor app for registered deployments. """

  deployment_id = helper.get_deployment_id()
  #If deployment is not registered, then do nothing.
  if not deployment_id:
    return

  uaserver = SOAPpy.SOAPProxy('https://{0}:{1}'.format(
    appscale_info.get_db_master_ip(), hermes_constants.UA_SERVER_PORT))

  # If the appscalesensor app is already running, then do nothing.
  is_app_enabled = uaserver.is_app_enabled(hermes_constants.APPSCALE_SENSOR,
    appscale_info.get_secret())
  if is_app_enabled == "true":
    return

  pwd = appscale_utils.encrypt_password(hermes_constants.USER_EMAIL,
    appscale_utils.random_password_generator())
  if create_appscale_user(pwd, uaserver) and create_xmpp_user(pwd, uaserver):
    logging.debug("Created new user and now tarring app to be deployed.")
    file_path = os.path.join(os.path.dirname(__file__), '../Apps/sensor')
    app_dir_location = os.path.join(hermes_constants.APP_DIR_LOCATION,
      hermes_constants.APPSCALE_SENSOR)
    archive = tarfile.open(app_dir_location, "w|gz")
    archive.add(file_path, arcname= hermes_constants.APPSCALE_SENSOR)
    archive.close()

    try:
      logging.info("Deploying the sensor app for registered deployments.")
      acc = appscale_info.get_appcontroller_client()
      acc.upload_app(app_dir_location, hermes_constants.FILE_SUFFIX,
        hermes_constants.USER_EMAIL)
    except AppControllerException:
      logging.exception("AppControllerException while trying to deploy "
        "appscalesensor app.")
  else:
    logging.error("Error while creating or accessing the user to deploy "
      "appscalesensor app.")
예제 #17
0
def get_node_info():
  """ Creates a list of JSON objects that contain node information and are
  needed to perform a backup/restore task on the current AppScale deployment.
  """

  # TODO
  # Add logic for choosing minimal set of nodes that need to perform a task.
  # e.g. Only the node that owns the entire keyspace.

  nodes = [{
    NodeInfoTags.HOST: get_br_service_url(appscale_info.get_db_master_ip()),
    NodeInfoTags.ROLE: 'db_master',
    NodeInfoTags.INDEX: None
  }]

  index = 0
  for node in appscale_info.get_db_slave_ips():
    host = get_br_service_url(node)
    # Make sure we don't send the same request on DB roles that reside on the
    # same node.
    if host not in nodes[0].values():
      nodes.append({
        NodeInfoTags.HOST: host,
        NodeInfoTags.ROLE: 'db_slave',
        NodeInfoTags.INDEX: index
      })
      index += 1

  index = 0
  for node in appscale_info.get_zk_node_ips():
    nodes.append({
      NodeInfoTags.HOST: get_br_service_url(node),
      NodeInfoTags.ROLE: 'zk',
      NodeInfoTags.INDEX: index
    })
    index += 1

  return nodes
예제 #18
0
import constants

from google.appengine.runtime import apiproxy_errors
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_distributed
from google.appengine.api import datastore
from google.appengine.ext import db


sys.path.append(TaskQueueConfig.CELERY_CONFIG_DIR)
sys.path.append(TaskQueueConfig.CELERY_WORKER_DIR)

app_id = "APP_ID"

config = TaskQueueConfig(TaskQueueConfig.RABBITMQ, app_id)
module_name = TaskQueueConfig.get_celery_worker_module_name(app_id)
celery = Celery(module_name, broker=config.get_broker_string(), backend="amqp")

celery.config_from_object("CELERY_CONFIGURATION")

logger = get_task_logger(__name__)

master_db_ip = appscale_info.get_db_master_ip()
connection_str = master_db_ip + ":" + str(constants.DB_SERVER_PORT)
ds_distrib = datastore_distributed.DatastoreDistributed("appscaledashboard", connection_str, require_indexes=False)
apiproxy_stub_map.apiproxy.RegisterStub("datastore_v3", ds_distrib)
os.environ["APPLICATION_ID"] = "appscaledashboard"

# This template header and tasks can be found in appscale/AppTaskQueue/templates
예제 #19
0
from google.appengine.runtime import apiproxy_errors
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_distributed
from google.appengine.api import datastore
from google.appengine.ext import db

sys.path.append(TaskQueueConfig.CELERY_CONFIG_DIR)
sys.path.append(TaskQueueConfig.CELERY_WORKER_DIR)

app_id = 'APP_ID'

config = TaskQueueConfig(TaskQueueConfig.RABBITMQ, app_id)
module_name = TaskQueueConfig.get_celery_worker_module_name(app_id)
celery = Celery(module_name, broker=config.get_broker_string(), backend='amqp')

celery.config_from_object('CELERY_CONFIGURATION')

logger = get_task_logger(__name__)

master_db_ip = appscale_info.get_db_master_ip()
connection_str = master_db_ip + ":" + str(constants.DB_SERVER_PORT)
ds_distrib = datastore_distributed.DatastoreDistributed("appscaledashboard",
                                                        connection_str,
                                                        require_indexes=False)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', ds_distrib)
os.environ['APPLICATION_ID'] = "appscaledashboard"

# This template header and tasks can be found in appscale/AppTaskQueue/templates