def test_remove_uuid_does_not_exist(cartridge_cmd,
                                    clusterwide_conf_non_existent_instance,
                                    tmpdir):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    clusterwide_conf = clusterwide_conf_non_existent_instance

    instances = ['instance-1', 'instance-2']
    write_instances_topology_conf(data_dir, APPNAME, clusterwide_conf.conf,
                                  instances)

    cmd = [
        cartridge_cmd,
        'repair',
        'remove-instance',
        '--name',
        APPNAME,
        '--data-dir',
        data_dir,
        clusterwide_conf.instance_uuid,
    ]

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 1

    assert_for_instances_group(
        get_logs(output), instances,
        lambda line: "Instance %s isn't found in cluster" % clusterwide_conf.
        instance_uuid in line)
예제 #2
0
파일: web.py 프로젝트: Ryex/i2c-alarmpy
def logs():
    if utils.needs_user():
        return flask.redirect(flask.url_for('setup'))
    if ('logged_in' not in flask.session) or (not flask.session['logged_in']):
        return flask.redirect(flask.url_for('login'))
    logs = utils.get_logs()
    return flask.render_template('logs.j2', mode="logs", logs=logs)
def test_bad_args(cartridge_cmd, conf_type, tmpdir,
                  clusterwide_conf_non_existent_instance,
                  clusterwide_conf_srv_expelled):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    configs = {
        'non-existent-srv': clusterwide_conf_non_existent_instance,
        'srv-expelled': clusterwide_conf_srv_expelled,
    }

    config = configs[conf_type]

    instances = ['instance-1', 'instance-2']
    write_instances_topology_conf(data_dir, APPNAME, config.conf, instances)

    cmd = [
        cartridge_cmd, 'repair', 'set-advertise-uri', '--name', APPNAME,
        '--data-dir', data_dir, config.instance_uuid, 'new-uri:666'
    ]

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 1

    exp_errors = {
        'non-existent-srv':
        "Instance %s isn't found in cluster" % config.instance_uuid,
        'srv-expelled': "Instance %s is expelled" % config.instance_uuid,
    }

    exp_error = exp_errors[conf_type]
    assert_for_instances_group(get_logs(output), instances,
                               lambda line: exp_error in line)
예제 #4
0
def test_non_bootstrapped_instance(cartridge_cmd, clusterwide_conf_simple,
                                   repair_cmd, tmpdir):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    config = clusterwide_conf_simple

    if repair_cmd == 'set-advertise-uri':
        args = [config.instance_uuid, config.instance_uri]
    elif repair_cmd == 'remove-instance':
        args = [config.instance_uuid]
    elif repair_cmd == 'set-leader':
        args = [config.replicaset_uuid, config.instance_uuid]

    cmd = [
        cartridge_cmd,
        'repair',
        repair_cmd,
        '--name',
        APPNAME,
        '--data-dir',
        data_dir,
    ]
    cmd.extend(args)

    instances = ['instance-1', 'instance-2']

    # no cluster-wide configs

    # # create empty work dirs for instance-2
    for instance in instances:
        work_dir = os.path.join(data_dir, '%s.%s' % (APPNAME, instance))
        os.makedirs(work_dir)

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 1
    assert "No cluster-wide configs found in %s" % data_dir in output

    # write config for instance-1
    write_instances_topology_conf(data_dir, APPNAME,
                                  clusterwide_conf_simple.conf, instances[:1])

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 0

    if repair_cmd == 'set-advertise-uri':
        first_log_line = "Set %s advertise URI to %s" % (args[0], args[1])
    elif repair_cmd == 'remove-instance':
        first_log_line = "Remove instance with UUID %s" % args[0]
    elif repair_cmd == 'set-leader':
        first_log_line = "Set %s leader to %s" % (args[0], args[1])

    logs = get_logs(output)
    assert len(logs) == 6
    assert logs[0] == first_log_line
    assert logs[1] == "Process application cluster-wide configurations..."
    assert logs[2] == "%s... OK" % instances[0]
    assert logs[3] == "Write application cluster-wide configurations..."
    assert logs[4] == "To reload cluster-wide configurations use --reload flag"
    assert logs[5] == "%s... OK" % instances[0]
예제 #5
0
    def get(self, token):
        token_info = self.ensure_token_exists(token)
        converter_addr = token_info['converter']
        converter = BancorConverter(converter_addr)
        dai_converter = BancorConverter(DAI_CONVERTER_ADDRESS)
        bnt_balance = converter.token_balance(BNT_ADDRESS if token_info['base_token'] == 'bnt' else USDB_ADDRESS)
        token_address = converter.token_address()
        token_decimals = ERC20(token_address).decimals()
        token_balance = converter.token_balance(token_address)
        token_price_in_bnt = bnt_balance / token_balance / 10 ** (18 - token_decimals)
        dai_price_in_bnt = dai_converter.price(DAI_ADDRESS)
        token_price_in_dai = token_price_in_bnt / dai_price_in_bnt

        current_block = w3.eth.blockNumber
        logs = get_logs(converter_addr, [EVENT_CONVERSION], current_block - BLOCKS_PER_DAY, current_block)
        volume = 0
        for log in logs:
            event = converter.parse_event('Conversion', log)
            if event['args']['_fromToken'] == BNT_ADDRESS:
                volume += event['args']['_amount']
            else:
                volume += event['args']['_return'] + event['args']['_conversionFee']

        return {
            'base_token_balance': bnt_balance / 10 ** 18,
            'token_balance': token_balance / 10 ** token_decimals,
            'token_price_in_base_token': token_price_in_bnt,
            'token_price_in_usd': token_price_in_dai,
            '24h_volume_in_base_token': volume / 10 ** 18,
            '24h_volume_in_usd': volume / 10 ** 18 / dai_price_in_bnt
        }
def test_set_uri(cartridge_cmd, conf_type, tmpdir, clusterwide_conf_simple,
                 clusterwide_conf_srv_disabled, clusterwide_conf_one_file):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    NEW_URI = 'new-uri:666'

    configs = {
        'simple': clusterwide_conf_simple,
        'srv-disabled': clusterwide_conf_srv_disabled,
        'one-file-config': clusterwide_conf_one_file,
    }

    config = configs[conf_type]
    old_conf = config.conf

    # create app configs
    instances = ['instance-1', 'instance-2']
    conf_paths = write_instances_topology_conf(data_dir, APPNAME, old_conf,
                                               instances, config.one_file)

    # create other app configs
    other_instances = ['other-instance-1', 'other-instance-2']
    other_app_conf_paths = write_instances_topology_conf(
        data_dir,
        OTHER_APP_NAME,
        old_conf,
        other_instances,
        config.one_file,
    )

    cmd = [
        cartridge_cmd,
        'repair',
        'set-advertise-uri',
        '--name',
        APPNAME,
        '--data-dir',
        data_dir,
        config.instance_uuid,
        NEW_URI,
    ]

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 0

    # check logs
    logs = get_logs(output)
    assert logs[0] == "Set %s advertise URI to %s" % (config.instance_uuid,
                                                      NEW_URI)

    instances_logs = logs[-len(instances):]
    assert_ok_for_all_instances(instances_logs, instances)

    # check app config changes
    new_conf = get_conf_with_new_uri(old_conf, config.instance_uuid, NEW_URI)
    assert_conf_changed(conf_paths, other_app_conf_paths, old_conf, new_conf)
예제 #7
0
파일: web.py 프로젝트: Ryex/i2c-alarmpy
def logs():
    if utils.needs_user():
        return flask.redirect(flask.url_for('setup'))
    if ('logged_in' not in flask.session) or (not flask.session['logged_in']):
        return flask.redirect(flask.url_for('login'))
    logs = utils.get_logs()
    return flask.render_template(
        'logs.j2',
        mode="logs",
        logs=logs)
def test_bad_args(cartridge_cmd, conf_type, tmpdir,
                  clusterwide_conf_non_existent_instance,
                  clusterwide_conf_non_existent_rpl,
                  clusterwide_conf_srv_disabled, clusterwide_conf_srv_expelled,
                  clusterwide_conf_srv_from_other_rpl):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    configs = {
        'non-existent-srv': clusterwide_conf_non_existent_instance,
        'non-existent-rpl': clusterwide_conf_non_existent_rpl,
        'srv-disabled': clusterwide_conf_srv_disabled,
        'srv-expelled': clusterwide_conf_srv_expelled,
        'srv-from-other-rpl': clusterwide_conf_srv_from_other_rpl,
    }

    config = configs[conf_type]

    instances = ['instance-1', 'instance-2']
    write_instances_topology_conf(data_dir, APPNAME, config.conf, instances)

    cmd = [
        cartridge_cmd,
        'repair',
        'set-leader',
        '--name',
        APPNAME,
        '--data-dir',
        data_dir,
        config.replicaset_uuid,
        config.instance_uuid,
    ]

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 1

    exp_errors = {
        'non-existent-srv':
        "Instance %s isn't found in cluster" % config.instance_uuid,
        'non-existent-rpl':
        "Replicaset %s isn't found in the cluster" % config.replicaset_uuid,
        'srv-disabled':
        "Instance %s is disabled" % config.instance_uuid,
        'srv-expelled':
        "Instance %s is expelled" % config.instance_uuid,
        'srv-from-other-rpl':
        "Instance %s doesn't belong to replicaset %s" %
        (config.instance_uuid, config.replicaset_uuid),
    }

    exp_error = exp_errors[conf_type]
    assert_for_instances_group(get_logs(output), instances,
                               lambda line: exp_error in line)
def test_remove(
        cartridge_cmd, conf_type, tmpdir, clusterwide_conf_simple,
        clusterwide_conf_srv_disabled, clusterwide_conf_srv_expelled,
        clusterwide_conf_srv_not_in_leaders, clusterwide_conf_non_existent_rpl,
        clusterwide_conf_srv_last_in_rpl, clusterwide_conf_srv_last_in_leaders,
        clusterwide_conf_current_leader_is_string, clusterwide_conf_one_file):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    configs = {
        'simple': clusterwide_conf_simple,
        'disabled': clusterwide_conf_srv_disabled,
        'expelled': clusterwide_conf_srv_expelled,
        'not-in-leaders': clusterwide_conf_srv_not_in_leaders,
        'non-existent-rpl': clusterwide_conf_non_existent_rpl,
        'srv-last-in-rpl': clusterwide_conf_srv_last_in_rpl,
        'srv-last-in-leaders': clusterwide_conf_srv_last_in_leaders,
        'leader-is-string': clusterwide_conf_current_leader_is_string,
        'one-file-config': clusterwide_conf_one_file,
    }

    config = configs[conf_type]
    old_conf = copy.deepcopy(config.conf)
    instance_uuid = config.instance_uuid

    # create app configs
    instances = ['instance-1', 'instance-2']
    conf_paths = write_instances_topology_conf(data_dir, APPNAME, old_conf,
                                               instances, config.one_file)

    # create other app configs
    other_instances = ['other-instance-1', 'other-instance-2']
    other_app_conf_paths = write_instances_topology_conf(
        data_dir,
        OTHER_APP_NAME,
        old_conf,
        other_instances,
        config.one_file,
    )

    cmd = [
        cartridge_cmd,
        'repair',
        'remove-instance',
        '--name',
        APPNAME,
        '--data-dir',
        data_dir,
        instance_uuid,
    ]

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 0

    # check logs
    logs = get_logs(output)
    assert logs[0] == "Remove instance with UUID %s" % config.instance_uuid

    instances_logs = logs[-len(instances):]
    assert_ok_for_all_instances(instances_logs, instances)

    # check config changes
    new_conf = get_conf_with_removed_instance(old_conf, config.instance_uuid)
    assert_conf_changed(conf_paths, other_app_conf_paths, old_conf, new_conf)
def test_set_leader(cartridge_cmd, conf_type, tmpdir,
                    clusterwide_conf_simple,
                    clusterwide_conf_srv_not_in_leaders,
                    clusterwide_conf_other_leader_is_string,
                    clusterwide_conf_one_file):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    configs = {
        'simple': clusterwide_conf_simple,
        'not-in-leaders': clusterwide_conf_srv_not_in_leaders,
        'leader-is-string': clusterwide_conf_other_leader_is_string,
        'one-file-config': clusterwide_conf_one_file,
    }

    config = configs[conf_type]
    old_conf = copy.deepcopy(config.conf)

    # create app configs
    instances = ['instance-1', 'instance-2']
    conf_paths = write_instances_topology_conf(data_dir, APPNAME, old_conf, instances, config.one_file)

    # create other app configs
    other_instances = ['other-instance-1', 'other-instance-2']
    other_app_conf_paths = write_instances_topology_conf(
        data_dir, OTHER_APP_NAME, old_conf, other_instances, config.one_file,
    )

    cmd = [
        cartridge_cmd, 'repair', 'set-leader',
        '--name', APPNAME,
        '--data-dir', data_dir,
        config.replicaset_uuid, config.instance_uuid,
    ]

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 0

    # check logs
    logs = get_logs(output)
    assert logs[0] == "Set %s leader to %s" % (config.replicaset_uuid, config.instance_uuid)

    instances_logs = logs[-len(instances):]
    assert_ok_for_all_instances(instances_logs, instances)

    # check app config changes
    new_conf = copy.deepcopy(old_conf)

    # apply expected changes to topology conf
    new_topology_conf = new_conf
    if config.one_file:
        new_topology_conf = new_conf['topology']

    new_leaders = new_topology_conf['replicasets'][config.replicaset_uuid]['master']
    if type(new_leaders) == list:
        if config.instance_uuid in new_leaders:
            new_leaders.remove(config.instance_uuid)

        new_leaders.insert(0, config.instance_uuid)
    else:
        new_topology_conf['replicasets'][config.replicaset_uuid]['master'] = config.instance_uuid

    assert_conf_changed(conf_paths, other_app_conf_paths, old_conf, new_conf)
예제 #11
0
def webhook():
    if request.method == 'POST':
        r = request.get_json()
        if r:
            if 'document' in r['message']:
                file_id = r['message']['document']['file_id']
                chat_id = r['message']['chat']['id']

                check, get_file_message = get_file_contetn(file_id)
                if check == True:
                    message, file_path, check = get_feature_names(file_id)
                    if check == True > 0:
                        my_lg(content_type='sending_a_file',
                              chat_id=chat_id,
                              logger_content='file path > 0  {}'.format(
                                  file_path))
                        doc = open(file_path, 'rb')
                        my_lg(content_type='sending_a_file',
                              chat_id=chat_id,
                              logger_content='file opned')
                        bot.send_document(chat_id, doc)
                        my_lg(content_type='sending_a_file',
                              chat_id=None,
                              logger_content='file sent')
                        my_lg(content_type='sending_a_file',
                              chat_id=None,
                              logger_content=message)
                    else:
                        send_message(chat_id, text=message)
                        return jsonify(r), 200

                else:
                    send_message(chat_id, text=get_file_message)
                    my_lg(content_type='getting_a_file',
                          chat_id=None,
                          logger_content=get_file_message)

            elif r['message']['text'] == 'logs' or r['message'][
                    'text'] == 'Logs':
                message = get_logs()
                chat_id = r['message']['chat']['id']
                send_message(chat_id, text=message)

            elif r['message']['text'] == 'info' or r['message'][
                    'text'] == 'Info':
                message = get_logs()
                chat_id = r['message']['chat']['id']
                send_message(chat_id, text=config.info)

            else:
                my_lg(content_type='messages_incoming',
                      chat_id=None,
                      logger_content=r)
                chat_id = r['message']['chat']['id']
                message = r['message']['text']
                send_message(
                    chat_id,
                    text=
                    '"{}" is not a valid command, type "info" to get more information'
                    .format(message))

        return jsonify(r), 200
    else:
        my_lg(content_type='telegram response',
              chat_id=None,
              logger_content=None)
        return 'ok', 200
def test_force_list_topology(cartridge_cmd, tmpdir, clusterwide_conf_simple_v1,
                             clusterwide_conf_simple_v2):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    config1 = clusterwide_conf_simple_v1
    config2 = clusterwide_conf_simple_v2

    # create app configs
    conf1_instances = ['instance-1', 'instance-2']
    conf1_paths = write_instances_topology_conf(data_dir, APPNAME,
                                                config1.conf, conf1_instances)

    conf2_instances = ['instance-3', 'instance-4']
    conf2_paths = write_instances_topology_conf(data_dir, APPNAME,
                                                config2.conf, conf2_instances)

    # instances = conf1_instances + conf2_instances

    cmd = [
        cartridge_cmd,
        'repair',
        'list-topology',
        '--name',
        APPNAME,
        '--data-dir',
        data_dir,
        '--force',
    ]

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 0

    assert_conf_not_changed(conf1_paths, config1.conf)
    assert_conf_not_changed(conf2_paths, config2.conf)

    lines = output.split('\n')
    logs = get_logs('\n'.join(lines[:3]))

    assert logs[0] == "Get current topology"
    assert "Clusterwide config is diverged between instances" in logs[1]
    assert logs[2] == "Process application cluster-wide configurations..."

    assert "Write application cluster-wide configurations..." not in output

    exp_summary_conf1 = '''   • instance-1, instance-2... OK
Instances
  * srv-1
    URI: localhost:3301
    replicaset: rpl-1
  * srv-2
    URI: srv-2-uri
    replicaset: rpl-1
  * srv-3
    URI: srv-3-uri
    replicaset: rpl-2
Replicasets
  * rpl-1
    roles:
     * vshard-storage
    instances:
     * srv-1
     * srv-2
  * rpl-2
    roles:
     * vshard-storage
    instances:
     * srv-3
'''

    exp_summary_conf2 = '''
   • instance-3, instance-4... OK
Instances
  * srv-1
    URI: localhost:3301
    replicaset: rpl-1
  * srv-2
    URI: srv-2-uri
    replicaset: rpl-1
  * srv-3
    URI: srv-3-uri
    replicaset: rpl-2
  * srv-4
    URI: srv-4-uri
    replicaset: rpl-2
Replicasets
  * rpl-1
    roles:
     * vshard-storage
    instances:
     * srv-1
     * srv-2
  * rpl-2
    roles:
     * vshard-storage
    instances:
     * srv-3
     * srv-4'''

    assert exp_summary_conf1 in output
    assert exp_summary_conf2 in output
def test_force_patch(cartridge_cmd, repair_cmd, tmpdir,
                     clusterwide_conf_simple_v1, clusterwide_conf_simple_v2):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    config1 = clusterwide_conf_simple_v1
    config2 = clusterwide_conf_simple_v2

    # create app configs
    conf1_instances = ['instance-1', 'instance-2']
    conf1_paths = write_instances_topology_conf(data_dir, APPNAME,
                                                config1.conf, conf1_instances)

    conf2_instances = ['instance-3', 'instance-4']
    conf2_paths = write_instances_topology_conf(data_dir, APPNAME,
                                                config2.conf, conf2_instances)

    instances = conf1_instances + conf2_instances

    args = simple_args.get(repair_cmd, [])
    cmd = [
        cartridge_cmd,
        'repair',
        repair_cmd,
        '--name',
        APPNAME,
        '--data-dir',
        data_dir,
        '--force',
    ]
    cmd.extend(args)

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 0

    if repair_cmd == 'set-advertise-uri':
        first_log_line = "Set %s advertise URI to %s" % (args[0], args[1])
    elif repair_cmd == 'remove-instance':
        first_log_line = "Remove instance with UUID %s" % args[0]
    elif repair_cmd == 'set-leader':
        first_log_line = "Set %s leader to %s" % (args[0], args[1])

    logs = get_logs(output)
    assert logs[0] == first_log_line
    assert "Clusterwide config is diverged between instances" in logs[1]
    assert logs[2] == "Process application cluster-wide configurations..."

    process_conf_logs = logs[3:5]
    assert_ok_for_instances_group(process_conf_logs, conf1_instances)
    assert_ok_for_instances_group(process_conf_logs, conf2_instances)

    assert logs[5] == "Write application cluster-wide configurations..."

    write_conf_logs = logs[6:]
    assert_ok_for_all_instances(write_conf_logs, instances)

    # check config changes independently
    if repair_cmd == 'set-advertise-uri':
        new_conf1 = get_conf_with_new_uri(config1.conf, config1.instance_uuid,
                                          args[1])
        new_conf2 = get_conf_with_new_uri(config2.conf, config2.instance_uuid,
                                          args[1])
    elif repair_cmd == 'remove-instance':
        new_conf1 = get_conf_with_removed_instance(config1.conf, args[0])
        new_conf2 = get_conf_with_removed_instance(config2.conf, args[0])
    elif repair_cmd == 'set-leader':
        new_conf1 = get_conf_with_new_leader(config1.conf, args[0], args[1])
        new_conf2 = get_conf_with_new_leader(config2.conf, args[0], args[1])

    assert_conf_changed(conf1_paths, None, config1.conf, new_conf1)
    assert_conf_changed(conf2_paths, None, config2.conf, new_conf2)
예제 #14
0
def logs() -> str:
    return json.dumps({'logs': get_logs()})
def test_list_topology(cartridge_cmd, tmpdir):
    data_dir = os.path.join(tmpdir, 'tmp', 'data')
    os.makedirs(data_dir)

    old_conf = copy.deepcopy(SIMPLE_CONF)

    # create app configs
    instances = ['instance-1', 'instance-2']
    conf_paths = write_instances_topology_conf(data_dir, APPNAME, old_conf,
                                               instances)

    # create other app configs
    other_instances = ['other-instance-1', 'other-instance-2']
    other_app_conf_paths = write_instances_topology_conf(
        data_dir, OTHER_APP_NAME, old_conf, other_instances)

    cmd = [
        cartridge_cmd,
        'repair',
        'list-topology',
        '--name',
        APPNAME,
        '--data-dir',
        data_dir,
    ]

    rc, output = run_command_and_get_output(cmd, cwd=tmpdir)
    assert rc == 0

    assert_conf_not_changed(conf_paths, old_conf)
    assert_conf_not_changed(other_app_conf_paths, old_conf)

    lines = output.split('\n')
    logs = get_logs('\n'.join(lines[:3]))

    assert logs[0] == "Get current topology"
    assert logs[1] == "Process application cluster-wide configurations..."
    assert_ok_for_instances_group(logs, instances)

    summary = '\n'.join(lines[3:])

    exp_summary = '''Instances
  * srv-1-uuid
    URI: localhost:3301
    replicaset: rpl-1-uuid
  * srv-2-uuid
    URI: localhost:3302
    replicaset: rpl-1-uuid
  * srv-3-uuid
    URI: localhost:3303
    replicaset: rpl-1-uuid
  * srv-4-uuid
    URI: localhost:3304
    replicaset: rpl-2-uuid
  * srv-5-uuid
    URI: localhost:3305
    replicaset: rpl-1-uuid
  * srv-6-uuid disabled
    URI: localhost:3306
    replicaset: rpl-2-uuid
  * srv-expelled expelled
Replicasets
  * rpl-1-uuid
    roles:
     * vshard-storage
    instances:
     * srv-1-uuid
     * srv-2-uuid
     * srv-3-uuid
     * srv-5-uuid
  * rpl-2-uuid
    roles:
     * vshard-storage
    instances:
     * srv-4-uuid
     * srv-6-uuid

'''

    assert summary == exp_summary