Ejemplo n.º 1
0
def _dump_agents(tempdir):
    ctx.send_event('Preparing agents data')
    client = get_rest_client()
    broker_config = BootstrapContext(ctx.bootstrap_context).broker_config()
    defaults = {
        'version': str(_get_manager_version(client)),
        'broker_config': broker_config
    }
    result = {}
    for deployment in client.deployments.list():
        deployment_result = {}
        for node in client.nodes.list(deployment_id=deployment.id):
            if _is_compute(node):
                node_result = {}
                for node_instance in client.node_instances.list(
                        deployment_id=deployment.id, node_name=node.id):
                    overrides = {}
                    current = node_instance.runtime_properties.get(
                        'cloudify_agent', {})
                    for k, v in defaults.iteritems():
                        overrides[k] = current.get(k, v)
                    node_result[node_instance.id] = overrides
                deployment_result[node.id] = node_result
        result[deployment.id] = deployment_result
    with open(os.path.join(tempdir, _AGENTS_FILE), 'w') as out:
        out.write(json.dumps(result))
def _dump_agents(tempdir):
    ctx.send_event('Preparing agents data')
    client = get_rest_client()
    broker_config = BootstrapContext(ctx.bootstrap_context).broker_config()
    defaults = {
        'version': str(_get_manager_version(client)),
        'broker_config': broker_config
    }
    result = {}
    for deployment in client.deployments.list():
        deployment_result = {}
        for node in client.nodes.list(deployment_id=deployment.id):
            if _is_compute(node):
                node_result = {}
                for node_instance in client.node_instances.list(
                        deployment_id=deployment.id,
                        node_name=node.id):
                    overrides = {}
                    current = node_instance.runtime_properties.get(
                        'cloudify_agent', {})
                    for k, v in defaults.iteritems():
                        overrides[k] = current.get(k, v)
                    node_result[node_instance.id] = overrides
                deployment_result[node.id] = node_result
        result[deployment.id] = deployment_result
    with open(os.path.join(tempdir, _AGENTS_FILE), 'w') as out:
        out.write(json.dumps(result))
Ejemplo n.º 3
0
def _restore_influxdb_3_3(tempdir):
    ctx.send_event('Restoring InfluxDB metrics')
    influxdb_f = os.path.join(tempdir, _INFLUXDB)
    if os.path.exists(influxdb_f):
        rcode = subprocess.call(_INFLUXDB_RESTORE_CMD.format(influxdb_f),
                                shell=True)
        if rcode != 0:
            raise NonRecoverableError('Error during restoring InfluxDB data, '
                                      'error code: {0}'.format(rcode))
def _restore_influxdb_3_3(tempdir):
    ctx.send_event('Restoring InfluxDB metrics')
    influxdb_f = os.path.join(tempdir, _INFLUXDB)
    if os.path.exists(influxdb_f):
        rcode = subprocess.call(_INFLUXDB_RESTORE_CMD.format(influxdb_f),
                                shell=True)
        if rcode != 0:
            raise NonRecoverableError('Error during restoring InfluxDB data, '
                                      'error code: {0}'.format(rcode))
def _create(snapshot_id, config, include_metrics, include_credentials, **kw):
    tempdir = tempfile.mkdtemp('-snapshot-data')

    snapshots_dir = os.path.join(
        config.file_server_root,
        config.file_server_snapshots_folder
    )

    try:
        if not os.path.exists(snapshots_dir):
            os.makedirs(snapshots_dir)

        metadata = {}

        # files/dirs copy
        _copy_data(tempdir, config)

        # elasticsearch
        es = _create_es_client(config)
        has_cloudify_events = es.indices.exists(index=_EVENTS_INDEX_NAME)
        _dump_elasticsearch(tempdir, es, has_cloudify_events)

        metadata[_M_HAS_CLOUDIFY_EVENTS] = has_cloudify_events

        # influxdb
        if include_metrics:
            _dump_influxdb(tempdir)

        # credentials
        if include_credentials:
            _dump_credentials(tempdir)

        # version
        metadata[_M_VERSION] = str(_get_manager_version())

        # metadata
        with open(os.path.join(tempdir, _METADATA_FILE), 'w') as f:
            json.dump(metadata, f)

        # agents
        _dump_agents(tempdir)

        # zip
        ctx.send_event('Creating snapshot archive')
        snapshot_dir = os.path.join(snapshots_dir, snapshot_id)
        os.makedirs(snapshot_dir)

        shutil.make_archive(
            os.path.join(snapshot_dir, snapshot_id),
            'zip',
            tempdir
        )
        # end
    finally:
        shutil.rmtree(tempdir)
Ejemplo n.º 6
0
def _dump_elasticsearch(tempdir, es, has_cloudify_events):
    ctx.send_event('Dumping elasticsearch data')
    storage_scan = elasticsearch.helpers.scan(es, index=_STORAGE_INDEX_NAME)
    storage_scan = _except_types(storage_scan, 'provider_context', 'snapshot')
    storage_scan = (e for e in storage_scan if e['_id'] != ctx.execution_id)

    event_scan = elasticsearch.helpers.scan(
        es, index=_EVENTS_INDEX_NAME if has_cloudify_events else 'logstash-*')

    with open(os.path.join(tempdir, _ELASTICSEARCH), 'w') as f:
        for item in itertools.chain(storage_scan, event_scan):
            f.write(json.dumps(item) + os.linesep)
def restore(snapshot_id, recreate_deployments_envs, config, force, **kwargs):

    ctx.logger.info('Restoring snapshot {0}'.format(snapshot_id))

    config = _DictToAttributes(config)

    _assert_clean_elasticsearch(log_warning=force)

    tempdir = tempfile.mkdtemp('-snapshot-data')

    try:
        file_server_root = config.file_server_root
        snapshots_dir = os.path.join(
            file_server_root,
            config.file_server_snapshots_folder
        )

        snapshot_path = os.path.join(snapshots_dir, snapshot_id, '{0}.zip'
                                     .format(snapshot_id))

        with zipfile.ZipFile(snapshot_path, 'r') as zipf:
            zipf.extractall(tempdir)

        with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f:
            metadata = json.load(f)

        client = get_rest_client()

        manager_version = _get_manager_version(client)
        from_version = ManagerVersion(metadata[_M_VERSION])

        ctx.logger.info('Manager version = {0}, snapshot version = {1}'.format(
            str(manager_version), str(from_version)))

        if from_version.greater_than(manager_version):
            raise NonRecoverableError(
                'Cannot restore a newer manager\'s snapshot on this manager '
                '[{0} > {1}]'.format(str(from_version), str(manager_version)))

        existing_deployments_ids = [d.id for d in client.deployments.list()]
        ctx.send_event('Starting restoring snapshot of manager {0}'
                       .format(from_version))

        _restore_snapshot(config, tempdir, metadata)

        if recreate_deployments_envs:
            recreate_deployments_environments(existing_deployments_ids)

        ctx.send_event('Successfully restored snapshot of manager {0}'
                       .format(from_version))
    finally:
        shutil.rmtree(tempdir)
def _dump_influxdb(tempdir):
    ctx.send_event('Dumping InfluxDB data')
    influxdb_file = os.path.join(tempdir, _INFLUXDB)
    influxdb_temp_file = influxdb_file + '.temp'
    rcode = subprocess.call(_INFLUXDB_DUMP_CMD.format(influxdb_temp_file),
                            shell=True)
    if rcode != 0:
        raise NonRecoverableError('Error during dumping InfluxDB data, '
                                  'error code: {0}'.format(rcode))
    with open(influxdb_temp_file, 'r') as f, open(influxdb_file, 'w') as g:
        for obj in _get_json_objects(f):
            g.write(obj + os.linesep)

    os.remove(influxdb_temp_file)
Ejemplo n.º 9
0
def restore(snapshot_id, recreate_deployments_envs, config, force, **kwargs):

    ctx.logger.info('Restoring snapshot {0}'.format(snapshot_id))

    config = _DictToAttributes(config)

    _assert_clean_elasticsearch(log_warning=force)

    tempdir = tempfile.mkdtemp('-snapshot-data')

    try:
        file_server_root = config.file_server_root
        snapshots_dir = os.path.join(file_server_root,
                                     config.file_server_snapshots_folder)

        snapshot_path = os.path.join(snapshots_dir, snapshot_id,
                                     '{0}.zip'.format(snapshot_id))

        with zipfile.ZipFile(snapshot_path, 'r') as zipf:
            zipf.extractall(tempdir)

        with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f:
            metadata = json.load(f)

        client = get_rest_client()

        manager_version = _get_manager_version(client)
        from_version = ManagerVersion(metadata[_M_VERSION])

        ctx.logger.info('Manager version = {0}, snapshot version = {1}'.format(
            str(manager_version), str(from_version)))

        if from_version.greater_than(manager_version):
            raise NonRecoverableError(
                'Cannot restore a newer manager\'s snapshot on this manager '
                '[{0} > {1}]'.format(str(from_version), str(manager_version)))

        existing_deployments_ids = [d.id for d in client.deployments.list()]
        ctx.send_event(
            'Starting restoring snapshot of manager {0}'.format(from_version))

        _restore_snapshot(config, tempdir, metadata)

        if recreate_deployments_envs:
            recreate_deployments_environments(existing_deployments_ids)

        ctx.send_event('Successfully restored snapshot of manager {0}'.format(
            from_version))
    finally:
        shutil.rmtree(tempdir)
Ejemplo n.º 10
0
def _create(snapshot_id, config, include_metrics, include_credentials, **kw):
    tempdir = tempfile.mkdtemp('-snapshot-data')

    snapshots_dir = os.path.join(config.file_server_root,
                                 config.file_server_snapshots_folder)

    try:
        if not os.path.exists(snapshots_dir):
            os.makedirs(snapshots_dir)

        metadata = {}

        # files/dirs copy
        _copy_data(tempdir, config)

        # elasticsearch
        es = _create_es_client(config)
        has_cloudify_events = es.indices.exists(index=_EVENTS_INDEX_NAME)
        _dump_elasticsearch(tempdir, es, has_cloudify_events)

        metadata[_M_HAS_CLOUDIFY_EVENTS] = has_cloudify_events

        # influxdb
        if include_metrics:
            _dump_influxdb(tempdir)

        # credentials
        if include_credentials:
            _dump_credentials(tempdir)

        # version
        metadata[_M_VERSION] = str(_get_manager_version())

        # metadata
        with open(os.path.join(tempdir, _METADATA_FILE), 'w') as f:
            json.dump(metadata, f)

        # agents
        _dump_agents(tempdir)

        # zip
        ctx.send_event('Creating snapshot archive')
        snapshot_dir = os.path.join(snapshots_dir, snapshot_id)
        os.makedirs(snapshot_dir)

        shutil.make_archive(os.path.join(snapshot_dir, snapshot_id), 'zip',
                            tempdir)
        # end
    finally:
        shutil.rmtree(tempdir)
Ejemplo n.º 11
0
def _dump_influxdb(tempdir):
    ctx.send_event('Dumping InfluxDB data')
    influxdb_file = os.path.join(tempdir, _INFLUXDB)
    influxdb_temp_file = influxdb_file + '.temp'
    rcode = subprocess.call(_INFLUXDB_DUMP_CMD.format(influxdb_temp_file),
                            shell=True)
    if rcode != 0:
        raise NonRecoverableError('Error during dumping InfluxDB data, '
                                  'error code: {0}'.format(rcode))
    with open(influxdb_temp_file, 'r') as f, open(influxdb_file, 'w') as g:
        for obj in _get_json_objects(f):
            g.write(obj + os.linesep)

    os.remove(influxdb_temp_file)
def _dump_elasticsearch(tempdir, es, has_cloudify_events):
    ctx.send_event('Dumping elasticsearch data')
    storage_scan = elasticsearch.helpers.scan(es, index=_STORAGE_INDEX_NAME)
    storage_scan = _except_types(storage_scan,
                                 'provider_context',
                                 'snapshot')
    storage_scan = (e for e in storage_scan if e['_id'] != ctx.execution_id)

    event_scan = elasticsearch.helpers.scan(
        es,
        index=_EVENTS_INDEX_NAME if has_cloudify_events else 'logstash-*'
    )

    with open(os.path.join(tempdir, _ELASTICSEARCH), 'w') as f:
        for item in itertools.chain(storage_scan, event_scan):
            f.write(json.dumps(item) + os.linesep)
Ejemplo n.º 13
0
def restore(snapshot_id, recreate_deployments_envs, config, force, **kwargs):
    mappings = {
        '3.3': _restore_snapshot_format_3_3,
        '3.2': _restore_snapshot_format_3_2
    }

    config = _DictToAttributes(config)

    _assert_clean_elasticsearch(log_warning=force)

    tempdir = tempfile.mkdtemp('-snapshot-data')

    try:
        file_server_root = config.file_server_root
        snapshots_dir = os.path.join(
            file_server_root,
            config.file_server_snapshots_folder
        )

        snapshot_path = os.path.join(snapshots_dir, snapshot_id, '{0}.zip'
                                     .format(snapshot_id))

        with zipfile.ZipFile(snapshot_path, 'r') as zipf:
            zipf.extractall(tempdir)

        with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f:
            metadata = json.load(f)

        from_version = metadata[_M_VERSION]

        if from_version not in mappings:
            raise NonRecoverableError('Manager is not able to restore snapshot'
                                      ' of manager {0}'.format(from_version))
        client = get_rest_client()
        existing_deployments_ids = [d.id for d in client.deployments.list()]
        ctx.send_event('Starting restoring snapshot of manager {0}'
                       .format(from_version))
        mappings[from_version](config, tempdir, metadata)

        if recreate_deployments_envs:
            recreate_deployments_environments(existing_deployments_ids)

        ctx.send_event('Successfully restored snapshot of manager {0}'
                       .format(from_version))
    finally:
        shutil.rmtree(tempdir)
Ejemplo n.º 14
0
def _dump_credentials(tempdir):
    ctx.send_event('Dumping credentials data')
    archive_cred_path = os.path.join(tempdir, _CRED_DIR)
    os.makedirs(archive_cred_path)

    hosts = [(dep_id, node)
             for dep_id, wctx in ctx.deployments_contexts.iteritems()
             for node in wctx.nodes if _is_compute(node)]

    for dep_id, n in hosts:
        props = n.properties
        if 'cloudify_agent' in props and 'key' in props['cloudify_agent']:
            node_id = dep_id + '_' + n.id
            agent_key_path = props['cloudify_agent']['key']
            os.makedirs(os.path.join(archive_cred_path, node_id))
            shutil.copy(
                os.path.expanduser(agent_key_path),
                os.path.join(archive_cred_path, node_id, _CRED_KEY_NAME))
Ejemplo n.º 15
0
def restore(snapshot_id, recreate_deployments_envs, config, force, **kwargs):
    mappings = {
        '3.3': _restore_snapshot_format_3_3,
        '3.2': _restore_snapshot_format_3_2
    }

    config = _DictToAttributes(config)

    _assert_clean_elasticsearch(log_warning=force)

    tempdir = tempfile.mkdtemp('-snapshot-data')

    try:
        file_server_root = config.file_server_root
        snapshots_dir = os.path.join(file_server_root,
                                     config.file_server_snapshots_folder)

        snapshot_path = os.path.join(snapshots_dir, snapshot_id,
                                     '{0}.zip'.format(snapshot_id))

        with zipfile.ZipFile(snapshot_path, 'r') as zipf:
            zipf.extractall(tempdir)

        with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f:
            metadata = json.load(f)

        from_version = metadata[_M_VERSION]

        if from_version not in mappings:
            raise NonRecoverableError('Manager is not able to restore snapshot'
                                      ' of manager {0}'.format(from_version))
        client = get_rest_client()
        existing_deployments_ids = [d.id for d in client.deployments.list()]
        ctx.send_event(
            'Starting restoring snapshot of manager {0}'.format(from_version))
        mappings[from_version](config, tempdir, metadata)

        if recreate_deployments_envs:
            recreate_deployments_environments(existing_deployments_ids)

        ctx.send_event('Successfully restored snapshot of manager {0}'.format(
            from_version))
    finally:
        shutil.rmtree(tempdir)
def _dump_credentials(tempdir):
    ctx.send_event('Dumping credentials data')
    archive_cred_path = os.path.join(tempdir, _CRED_DIR)
    os.makedirs(archive_cred_path)

    hosts = [(dep_id, node)
             for dep_id, wctx in ctx.deployments_contexts.iteritems()
             for node in wctx.nodes
             if _is_compute(node)]

    for dep_id, n in hosts:
        props = n.properties
        if 'cloudify_agent' in props and 'key' in props['cloudify_agent']:
            node_id = dep_id + '_' + n.id
            agent_key_path = props['cloudify_agent']['key']
            os.makedirs(os.path.join(archive_cred_path, node_id))
            shutil.copy(os.path.expanduser(agent_key_path),
                        os.path.join(archive_cred_path, node_id,
                                     _CRED_KEY_NAME))
Ejemplo n.º 17
0
def _restore_elasticsearch(tempdir, es, metadata):

    has_cloudify_events_index = es.indices.exists(index=_EVENTS_INDEX_NAME)
    snap_has_cloudify_events_index = metadata[_M_HAS_CLOUDIFY_EVENTS]

    # cloudify_events -> cloudify_events, logstash-* -> logstash-*
    def get_data_itr():
        for line in open(os.path.join(tempdir, _ELASTICSEARCH), 'r'):
            elem = json.loads(line)
            _update_es_node(elem)
            yield elem

    _check_conflicts(es, get_data_itr())

    # logstash-* -> cloudify_events
    def logstash_to_cloudify_events():
        for elem in get_data_itr():
            if elem['_index'].startswith('logstash-'):
                elem['_index'] = _EVENTS_INDEX_NAME
            yield elem

    def cloudify_events_to_logstash():
        d = datetime.now()
        index = 'logstash-{0}'.format(d.strftime('%Y.%m.%d'))
        for elem in get_data_itr():
            if elem['_index'] == _EVENTS_INDEX_NAME:
                elem['_index'] = index
            yield elem

    # choose iter
    if (has_cloudify_events_index and snap_has_cloudify_events_index) or\
            (not has_cloudify_events_index and
             not snap_has_cloudify_events_index):
        data_iter = get_data_itr()
    elif not snap_has_cloudify_events_index and has_cloudify_events_index:
        data_iter = logstash_to_cloudify_events()
    else:
        data_iter = cloudify_events_to_logstash()

    ctx.send_event('Restoring ElasticSearch data')
    elasticsearch.helpers.bulk(es, data_iter)
    es.indices.flush()
def _restore_elasticsearch(tempdir, es, metadata):

    has_cloudify_events_index = es.indices.exists(index=_EVENTS_INDEX_NAME)
    snap_has_cloudify_events_index = metadata[_M_HAS_CLOUDIFY_EVENTS]

    # cloudify_events -> cloudify_events, logstash-* -> logstash-*
    def get_data_itr():
        for line in open(os.path.join(tempdir, _ELASTICSEARCH), 'r'):
            elem = json.loads(line)
            _update_es_node(elem)
            yield elem

    _check_conflicts(es, get_data_itr())

    # logstash-* -> cloudify_events
    def logstash_to_cloudify_events():
        for elem in get_data_itr():
            if elem['_index'].startswith('logstash-'):
                elem['_index'] = _EVENTS_INDEX_NAME
            yield elem

    def cloudify_events_to_logstash():
        d = datetime.now()
        index = 'logstash-{0}'.format(d.strftime('%Y.%m.%d'))
        for elem in get_data_itr():
            if elem['_index'] == _EVENTS_INDEX_NAME:
                elem['_index'] = index
            yield elem

    # choose iter
    if (has_cloudify_events_index and snap_has_cloudify_events_index) or\
            (not has_cloudify_events_index and
             not snap_has_cloudify_events_index):
        data_iter = get_data_itr()
    elif not snap_has_cloudify_events_index and has_cloudify_events_index:
        data_iter = logstash_to_cloudify_events()
    else:
        data_iter = cloudify_events_to_logstash()

    ctx.send_event('Restoring ElasticSearch data')
    elasticsearch.helpers.bulk(es, data_iter)
    es.indices.flush()
Ejemplo n.º 19
0
def recreate_deployments_environments(deployments_to_skip):
    rest_client = get_rest_client()
    for dep_id, dep_ctx in ctx.deployments_contexts.iteritems():
        if dep_id in deployments_to_skip:
            continue
        with dep_ctx:
            dep = rest_client.deployments.get(dep_id)
            blueprint = rest_client.blueprints.get(dep_ctx.blueprint.id)
            blueprint_plan = blueprint['plan']
            tasks_graph = generate_create_dep_tasks_graph(
                dep_ctx,
                deployment_plugins_to_install=blueprint_plan[
                    'deployment_plugins_to_install'],
                workflow_plugins_to_install=blueprint_plan[
                    'workflow_plugins_to_install'],
                policy_configuration={
                    'policy_types': dep['policy_types'],
                    'policy_triggers': dep['policy_triggers'],
                    'groups': dep['groups']
                })
            tasks_graph.execute()
            ctx.send_event('Successfully created deployment environment '
                           'for deployment {0}'.format(dep_id))
Ejemplo n.º 20
0
def _restore_credentials_3_3(tempdir, es):
    ctx.send_event('Restoring credentials')
    archive_cred_path = os.path.join(tempdir, _CRED_DIR)

    # in case when this is not the first restore action
    if os.path.exists(_RESTORED_CRED_DIR):
        shutil.rmtree(_RESTORED_CRED_DIR)

    os.makedirs(_RESTORED_CRED_DIR)

    update_actions = []
    if os.path.exists(archive_cred_path):
        for node_id in os.listdir(archive_cred_path):
            os.makedirs(os.path.join(_RESTORED_CRED_DIR, node_id))
            agent_key_path = os.path.join(_RESTORED_CRED_DIR, node_id,
                                          _CRED_KEY_NAME)
            shutil.copy(
                os.path.join(archive_cred_path, node_id, _CRED_KEY_NAME),
                agent_key_path)

            update_action = {
                '_op_type': 'update',
                '_index': _STORAGE_INDEX_NAME,
                '_type': 'node',
                '_id': node_id,
                'doc': {
                    'properties': {
                        'cloudify_agent': {
                            'key': agent_key_path
                        }
                    }
                }
            }

            update_actions.append(update_action)

    elasticsearch.helpers.bulk(es, update_actions)
def recreate_deployments_environments(deployments_to_skip):
    rest_client = get_rest_client()
    for dep_id, dep_ctx in ctx.deployments_contexts.iteritems():
        if dep_id in deployments_to_skip:
            continue
        with dep_ctx:
            dep = rest_client.deployments.get(dep_id)
            blueprint = rest_client.blueprints.get(dep_ctx.blueprint.id)
            blueprint_plan = blueprint['plan']
            tasks_graph = generate_create_dep_tasks_graph(
                dep_ctx,
                deployment_plugins_to_install=blueprint_plan[
                    'deployment_plugins_to_install'],
                workflow_plugins_to_install=blueprint_plan[
                    'workflow_plugins_to_install'],
                policy_configuration={
                    'policy_types': dep['policy_types'],
                    'policy_triggers': dep['policy_triggers'],
                    'groups': dep['groups']
                }
            )
            tasks_graph.execute()
            ctx.send_event('Successfully created deployment environment '
                           'for deployment {0}'.format(dep_id))
def _restore_credentials_3_3(tempdir, es):
    ctx.send_event('Restoring credentials')
    archive_cred_path = os.path.join(tempdir, _CRED_DIR)

    # in case when this is not the first restore action
    if os.path.exists(_RESTORED_CRED_DIR):
        shutil.rmtree(_RESTORED_CRED_DIR)

    os.makedirs(_RESTORED_CRED_DIR)

    update_actions = []
    if os.path.exists(archive_cred_path):
        for node_id in os.listdir(archive_cred_path):
            os.makedirs(os.path.join(_RESTORED_CRED_DIR, node_id))
            agent_key_path = os.path.join(_RESTORED_CRED_DIR, node_id,
                                          _CRED_KEY_NAME)
            shutil.copy(os.path.join(archive_cred_path, node_id,
                        _CRED_KEY_NAME), agent_key_path)

            update_action = {
                '_op_type': 'update',
                '_index': _STORAGE_INDEX_NAME,
                '_type': 'node',
                '_id': node_id,
                'doc': {
                    'properties': {
                        'cloudify_agent': {
                            'key': agent_key_path
                        }
                    }
                }
            }

            update_actions.append(update_action)

    elasticsearch.helpers.bulk(es, update_actions)
Ejemplo n.º 23
0
def _restore_agents_data(tempdir):
    ctx.send_event('Updating cloudify agent data')
    client = get_rest_client()
    with open(os.path.join(tempdir, _AGENTS_FILE)) as agents_file:
        agents = json.load(agents_file)
    insert_agents_data(client, agents)
def _restore_agents_data(tempdir):
    ctx.send_event('Updating cloudify agent data')
    client = get_rest_client()
    with open(os.path.join(tempdir, _AGENTS_FILE)) as agents_file:
        agents = json.load(agents_file)
    insert_agents_data(client, agents)