def test_expel_leader(cartridge_cmd, project_with_vshard_replicasets): project = project_with_vshard_replicasets.project instances = project_with_vshard_replicasets.instances # the replicaset leader can be expelled if vshard is bootstrapped # since cartridge 2.7.0 # https://github.com/tarantool/cartridge/issues/1281 # bootstrap vshard cmd = [ cartridge_cmd, 'replicasets', 'bootstrap-vshard', ] rc, _ = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 hot_master = instances['hot-master'] # expel cold storage master cmd = [ cartridge_cmd, 'replicasets', 'expel', hot_master.name, ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0
def test_bootstrap(cartridge_cmd, project_with_vshard_replicasets): project = project_with_vshard_replicasets.project instances = project_with_vshard_replicasets.instances # bootstrap vshard cmd = [ cartridge_cmd, 'replicasets', 'bootstrap-vshard', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert get_log_lines(output) == [ "• Vshard is bootstrapped successfully" ] router = instances['router'] admin_api_url = router.get_admin_api_url() assert is_vshard_bootstrapped(admin_api_url) # bootstrap again cmd = [ cartridge_cmd, 'replicasets', 'bootstrap-vshard', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 1 assert "already bootstrapped" in output
def test_instance_specified(cartridge_cmd, start_stop_cli, custom_admin_running_instances, admin_flow, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() # stop instance-2 INSTANCE1 = 'instance-1' INSTANCE2 = 'instance-2' start_stop_cli.stop(project, [INSTANCE2]) args = simple_args[admin_flow] # don't specify any instance - instance-1 is chosen cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 # specify --instance=instance-2 - fail cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, '--instance', INSTANCE2, ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert "Failed to dial" in output # specify --instance=instance-1 - ok cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, '--instance', INSTANCE1, ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0
def test_bad_replicaset_name(cartridge_cmd, project_with_replicaset_no_roles): project = project_with_replicaset_no_roles.project # add-roles cmd = [ cartridge_cmd, 'replicasets', 'add-roles', '--replicaset', 'unknown-replicaset', 'vshard-router', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 1 assert "Replica set unknown-replicaset isn't found in current topology" in output # remove-roles cmd = [ cartridge_cmd, 'replicasets', 'remove-roles', '--replicaset', 'unknown-replicaset', 'vshard-router', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 1 assert "Replica set unknown-replicaset isn't found in current topology" in output
def test_status_stateful_etcd2(cartridge_cmd, project_with_topology_and_vshard): project = project_with_topology_and_vshard cmd = [ cartridge_cmd, "failover", "set", "stateful", "--state-provider", "etcd2", "--provider-params", "{\"prefix\": \"test_prefix\", \"lock_delay\": 15}", ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert "Failover configured successfully" in output failover_info = get_etcd2_failover_info() cmd = [cartridge_cmd, "failover", "status"] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert_mode_and_params_state(failover_info, output) assert "stateboard_params" not in output assert "etcd2_params" in output assert f"password: {failover_info['etcd2_params']['password']}" in output assert f"lock_delay: {failover_info['etcd2_params']['lock_delay']}" in output assert f"endpoints: {', '.join(failover_info['etcd2_params']['endpoints'])}" in output assert f"username: {failover_info['etcd2_params']['username']}" in output assert f"prefix: {failover_info['etcd2_params']['prefix']}" in output
def test_tempdir(cartridge_cmd, project_without_dependencies, tmpdir, pack_format): project = project_without_dependencies cmd = [ cartridge_cmd, "pack", pack_format, project.path, ] env = os.environ.copy() # pass application path as a cartridge_tempdir env['CARTRIDGE_TEMPDIR'] = project.path rc, output = run_command_and_get_output(cmd, cwd=tmpdir, env=env) assert rc == 0 # pass application subdirectory as a cartridge_tempdir env['CARTRIDGE_TEMPDIR'] = os.path.join(project.path, 'sub', 'sub', 'directory') rc, output = run_command_and_get_output(cmd, cwd=tmpdir, env=env) assert rc == 0 # pass correct directory as a cartridge_tempdir cartridge_tempdir = os.path.join(tmpdir, 'build') env['CARTRIDGE_TEMPDIR'] = cartridge_tempdir rc, output = run_command_and_get_output(cmd, cwd=tmpdir, env=env) assert rc == 0 assert re.search( r'Temporary directory is set to {}'.format(cartridge_tempdir), output) is not None
def test_repiar_no_workdirs(cartridge_cmd, clusterwide_conf_simple, repair_cmd, tmpdir): data_dir = os.path.join(tmpdir, 'tmp', 'data') os.makedirs(data_dir) args = simple_args.get(repair_cmd, []) cmd = [ cartridge_cmd, 'repair', repair_cmd, '--name', APPNAME, '--data-dir', data_dir, ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert "No instance working directories found in %s" % data_dir in output # create other app workdirs instances = ['instance-1', 'instance-2'] write_instances_topology_conf(data_dir, OTHER_APP_NAME, clusterwide_conf_simple.conf, instances) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert "No instance working directories found in %s" % data_dir in output
def test_status_stateful_stateboard(cartridge_cmd, project_with_topology_and_vshard): project = project_with_topology_and_vshard cmd = [ cartridge_cmd, "failover", "set", "stateful", "--state-provider", "stateboard", "--provider-params", "{\"uri\": \"localhost:1020\", \"password\": \"pass\"}", ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert "Failover configured successfully" in output failover_info = get_stateboard_failover_info() cmd = [cartridge_cmd, "failover", "status"] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert_mode_and_params_state(failover_info, output) assert "etcd2_params" not in output assert "stateboard_params" in output assert f"uri: {failover_info['tarantool_params']['uri']}" in output assert f"password: {failover_info['tarantool_params']['password']}" in output
def test_non_bootstrapped_instance(cartridge_cmd, clusterwide_conf_simple, repair_cmd, tmpdir): data_dir = os.path.join(tmpdir, 'tmp', 'data') os.makedirs(data_dir) config = clusterwide_conf_simple if repair_cmd == 'set-advertise-uri': args = [config.instance_uuid, config.instance_uri] elif repair_cmd == 'remove-instance': args = [config.instance_uuid] elif repair_cmd == 'set-leader': args = [config.replicaset_uuid, config.instance_uuid] cmd = [ cartridge_cmd, 'repair', repair_cmd, '--name', APPNAME, '--data-dir', data_dir, ] cmd.extend(args) instances = ['instance-1', 'instance-2'] # no cluster-wide configs # # create empty work dirs for instance-2 for instance in instances: work_dir = os.path.join(data_dir, '%s.%s' % (APPNAME, instance)) os.makedirs(work_dir) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert "No cluster-wide configs found in %s" % data_dir in output # write config for instance-1 write_instances_topology_conf(data_dir, APPNAME, clusterwide_conf_simple.conf, instances[:1]) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 if repair_cmd == 'set-advertise-uri': first_log_line = "Set %s advertise URI to %s" % (args[0], args[1]) elif repair_cmd == 'remove-instance': first_log_line = "Remove instance with UUID %s" % args[0] elif repair_cmd == 'set-leader': first_log_line = "Set %s leader to %s" % (args[0], args[1]) logs = get_logs(output) assert len(logs) == 6 assert logs[0] == first_log_line assert logs[1] == "Process application cluster-wide configurations..." assert logs[2] == "%s... OK" % instances[0] assert logs[3] == "Write application cluster-wide configurations..." assert logs[4] == "To reload cluster-wide configurations use --reload flag" assert logs[5] == "%s... OK" % instances[0]
def test_expel_fails(cartridge_cmd, project_with_vshard_replicasets): project = project_with_vshard_replicasets.project instances = project_with_vshard_replicasets.instances # the replicaset leader can't be expelled if vshard is bootstrapped # bootstrap vshard cmd = [ cartridge_cmd, 'replicasets', 'bootstrap-vshard', ] rc, _ = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 hot_master = instances['hot-master'] # expel cold sotrage master cmd = [ cartridge_cmd, 'replicasets', 'expel', hot_master.name, ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 1 assert "is the leader and can't be expelled" in output
def test_builddir(cartridge_cmd, project_without_dependencies, tmpdir): project = project_without_dependencies cmd = [ cartridge_cmd, "pack", "rpm", project.path, ] env = os.environ.copy() # pass application path as a builddir env['CARTRIDGE_BUILDDIR'] = project.path rc, output = run_command_and_get_output(cmd, cwd=tmpdir, env=env) assert rc == 1 assert "Build directory can't be project subdirectory" in output # pass application subdirectory as a builddir env['CARTRIDGE_BUILDDIR'] = os.path.join(project.path, 'sub', 'sub', 'directory') rc, output = run_command_and_get_output(cmd, cwd=tmpdir, env=env) assert rc == 1 assert "Build directory can't be project subdirectory" in output # pass correct directory as a builddir builddir = os.path.join(tmpdir, 'build') env['CARTRIDGE_BUILDDIR'] = builddir rc, output = run_command_and_get_output(cmd, cwd=tmpdir, env=env) assert rc == 0 assert re.search(r'[Bb]uild directory .*{}'.format(builddir), output) is not None
def test_default_application(cartridge_cmd, default_project_with_instances): project = default_project_with_instances.project # setup replicasets cmd = [ cartridge_cmd, 'replicasets', 'setup', '--bootstrap-vshard', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 # list replicasets cmd = [ cartridge_cmd, 'replicasets', 'list', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert output.strip() == """• Current replica sets:
def test_repiar_bad_data_dir(cartridge_cmd, repair_cmd, tmpdir): args = simple_args.get(repair_cmd, []) # non existent path cmd = [ cartridge_cmd, 'repair', repair_cmd, '--name', APPNAME, '--data-dir', 'non/existent/path', ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert re.search(r"Data directory \S+ doesn't exist", output) is not None # file instead of the directory filepath = os.path.join(tmpdir, 'data-dir-file') with open(filepath, 'w') as f: f.write("Hi") cmd = [ cartridge_cmd, 'repair', repair_cmd, '--name', APPNAME, '--data-dir', filepath, ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert "is not a directory" in output
def test_bad_run_dir(cartridge_cmd, custom_admin_running_instances, admin_flow, tmpdir): project = custom_admin_running_instances['project'] args = simple_args[admin_flow] # non-exitsent-path cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', 'non/existent/path', ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert re.search(r"Run directory \S+ doesn't exist", output) is not None # file instead of the directory filepath = os.path.join(tmpdir, 'run-dir-file') with open(filepath, 'w') as f: f.write("Hi") cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', filepath, ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert "is not a directory" in output # empty directory dirpath = os.path.join(tmpdir, 'empty-run-dir') os.makedirs(dirpath) cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', dirpath, ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert re.search(r"Run directory \S+ is empty", output) is not None
def test_list_with_zones(cartridge_cmd, project_with_instances): project = project_with_instances.project instances = project_with_instances.instances if project.name == 'my-old-project': pytest.skip("Old cartridge doesn't support zones") router = instances['router'] s1_master = instances['s1-master'] s1_replica = instances['s1-replica'] s1_replica2 = instances['s1-replica-2'] # setup replicasets rpl_cfg_path = project.get_replicasets_cfg_path() rpl_cfg = { 'router': { 'roles': ['vshard-router', 'app.roles.custom', 'failover-coordinator'], 'instances': [router.name], }, 's-1': { 'roles': ['vshard-storage'], 'instances': [s1_master.name, s1_replica.name, s1_replica2.name], 'vshard_group': 'hot', }, } write_conf(rpl_cfg_path, rpl_cfg) cmd = [ cartridge_cmd, 'replicasets', 'setup', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 # set storages zones admin_api_url = router.get_admin_api_url() set_instance_zone(admin_api_url, router.name, "Mordor") set_instance_zone(admin_api_url, s1_master.name, "Hogwarts") set_instance_zone(admin_api_url, s1_replica.name, "Narnia") # get current topology cmd = [ cartridge_cmd, 'replicasets', 'list', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert output.strip() == """• Current replica sets:
def test_call_many_args(cartridge_cmd, custom_admin_running_instances, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() base_cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, 'echo_user', ] # all args cmd = base_cmd + [ '--username', 'Elizabeth', '--age', '23', '--loves-cakes', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ '• Hi, Elizabeth!', '• You are 23 years old', '• I know that you like cakes!', ] # age missed # check that default number flag value (0) isn't passed cmd = base_cmd + [ '--username', 'Elizabeth', '--loves-cakes', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ "• Hi, Elizabeth!", "• I don't know your age", "• I know that you like cakes!", ] # bool flag is false cmd = base_cmd + [ '--username', 'Elizabeth', '--loves-cakes=false', ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert get_log_lines(output) == [ "• Hi, Elizabeth!", "• I don't know your age", "• How can you not love cakes?", ]
def test_result_image_fullname(cartridge_cmd, project_without_dependencies, tmpdir): project = project_without_dependencies # only version version = '0.1.0-42-gdeadbeaf' expected_image_fullname = '{name}:{version}'.format( name=project.name, version=version, ) cmd = [ cartridge_cmd, "pack", 'docker', "--version", version, project.path, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert 'Result image tagged as: {}'.format(expected_image_fullname) in output # version and suffix version = '0.1.0-42-gdeadbeaf' suffix = 'dev' expected_image_fullname = '{name}:{version}-{suffix}'.format( name=project.name, version=version, suffix=suffix ) cmd = [ cartridge_cmd, "pack", 'docker', "--version", version, "--suffix", suffix, project.path, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert 'Result image tagged as: {}'.format(expected_image_fullname) in output # tag tag = 'my-cute-tag:xxx' expected_image_fullname = tag cmd = [ cartridge_cmd, "pack", 'docker', "--tag", tag, project.path, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert 'Result image tagged as: {}'.format(expected_image_fullname) in output
def test_quiet_build(cartridge_cmd, project_without_dependencies): project = project_without_dependencies prebuild_output = "pre-build hook output" rocks_make_output = "{} scm-1 is now installed".format(project.name) with open(os.path.join(project.path, 'cartridge.pre-build'), 'w') as f: prebuild_script_lines = [ "#!/bin/sh", "echo \"{}\"".format(prebuild_output) ] f.write('\n'.join(prebuild_script_lines)) # w/o --quiet cmd = [ cartridge_cmd, "build", ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0, 'Building project failed' assert prebuild_output in output assert rocks_make_output in output # with --quiet cmd = [ cartridge_cmd, "build", "--quiet", ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0, 'Building project failed' assert prebuild_output not in output assert rocks_make_output not in output # hook error with --quiet cmd = [ cartridge_cmd, "build", "--quiet", ] with open(os.path.join(project.path, 'cartridge.pre-build'), 'w') as f: prebuild_script_lines = [ "#!/bin/sh", "echo \"{}\"".format(prebuild_output), "exit 1" ] f.write('\n'.join(prebuild_script_lines)) rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 1, 'Building project should fail' assert prebuild_output in output
def test_list(cartridge_cmd, project_with_instances): project = project_with_instances.project instances = project_with_instances.instances router = instances['router'] s1_master = instances['s1-master'] s1_replica = instances['s1-replica'] s1_replica2 = instances['s1-replica-2'] # setup replicasets rpl_cfg_path = project.get_replicasets_cfg_path() rpl_cfg = { 'router': { 'roles': ['vshard-router', 'app.roles.custom', 'failover-coordinator'], 'instances': [router.name], }, 's-1': { 'roles': ['vshard-storage'], 'instances': [s1_master.name, s1_replica.name, s1_replica2.name], 'weight': 1.234, 'vshard_group': 'hot', 'all_rw': True, }, } write_conf(rpl_cfg_path, rpl_cfg) cmd = [ cartridge_cmd, 'replicasets', 'setup', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 # get current topology cmd = [ cartridge_cmd, 'replicasets', 'list', ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert output.strip() == """• Current replica sets:
def test_project_without_init(cartridge_cmd, project_without_dependencies, pack_format, tmpdir): project = project_without_dependencies ENTRYPOINT_NAME = 'init.lua' # remove entrypoint from project os.remove(os.path.join(project.path, ENTRYPOINT_NAME)) cmd = [ cartridge_cmd, "pack", pack_format, project.path, ] if pack_format in ['rpm', 'deb'] and platform.system() == 'Darwin': cmd.append('--use-docker') rc, output = run_command_and_get_output(cmd, cwd=tmpdir) if pack_format == 'tgz': assert rc == 0 else: assert rc == 1 assert "Application doesn't contain entrypoint script" in output
def test_remove_uuid_does_not_exist(cartridge_cmd, clusterwide_conf_non_existent_instance, tmpdir): data_dir = os.path.join(tmpdir, 'tmp', 'data') os.makedirs(data_dir) clusterwide_conf = clusterwide_conf_non_existent_instance instances = ['instance-1', 'instance-2'] write_instances_topology_conf(data_dir, APPNAME, clusterwide_conf.conf, instances) cmd = [ cartridge_cmd, 'repair', 'remove-instance', '--name', APPNAME, '--data-dir', data_dir, clusterwide_conf.instance_uuid, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert_for_instances_group( get_logs(output), instances, lambda line: "Instance %s isn't found in cluster" % clusterwide_conf. instance_uuid in line)
def test_set_weight(cartridge_cmd, project_with_vshard_replicasets): project = project_with_vshard_replicasets.project instances = project_with_vshard_replicasets.instances replicasets = project_with_vshard_replicasets.replicasets hot_storage_rpl = replicasets['hot-storage'] hot_master = instances['hot-master'] admin_api_url = hot_master.get_admin_api_url() NEW_WEIGHT = 123.45 # set replicaset weight cmd = [ cartridge_cmd, 'replicasets', 'set-weight', '--replicaset', hot_storage_rpl.name, str(NEW_WEIGHT), ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert get_log_lines(output) == [ '• Replica set %s weight is set to %s' % (hot_storage_rpl.name, NEW_WEIGHT), ] replicasets = get_replicasets(admin_api_url) hot_replicaset = get_replicaset_by_alias(replicasets, hot_storage_rpl.name) assert hot_replicaset is not None assert hot_replicaset['weight'] == NEW_WEIGHT
def test_base_runtime_dockerfile_with_env_vars(cartridge_cmd, project_without_dependencies, tmpdir): # The main idea of this test is to check that using `${name}` constructions # in the base Dockerfile doesn't break the `pack` command running. # So, it's not about testing that the ENV option works, it's about # testing that `pack docker` command wouldn't fail if the base Dockerfile # contains `${name}` constructions. # The problem is the `expand` function. # Base Dockerfile with `${name}` shouldn't be passed to this function, # otherwise it will raise an error or substitute smth wrong. dockerfile_with_env_path = os.path.join(tmpdir, 'Dockerfile') with open(dockerfile_with_env_path, 'w') as f: f.write(''' FROM centos:8 # comment this string to use cached image # ENV TEST_VARIABLE=${TEST_VARIABLE} ''') cmd = [ cartridge_cmd, "pack", "docker", "--from", dockerfile_with_env_path, project_without_dependencies.path, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 0 assert 'Detected base Dockerfile {}'.format( dockerfile_with_env_path) in output
def test_bad_args(cartridge_cmd, conf_type, tmpdir, clusterwide_conf_non_existent_instance, clusterwide_conf_srv_expelled): data_dir = os.path.join(tmpdir, 'tmp', 'data') os.makedirs(data_dir) configs = { 'non-existent-srv': clusterwide_conf_non_existent_instance, 'srv-expelled': clusterwide_conf_srv_expelled, } config = configs[conf_type] instances = ['instance-1', 'instance-2'] write_instances_topology_conf(data_dir, APPNAME, config.conf, instances) cmd = [ cartridge_cmd, 'repair', 'set-advertise-uri', '--name', APPNAME, '--data-dir', data_dir, config.instance_uuid, 'new-uri:666' ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 exp_errors = { 'non-existent-srv': "Instance %s isn't found in cluster" % config.instance_uuid, 'srv-expelled': "Instance %s is expelled" % config.instance_uuid, } exp_error = exp_errors[conf_type] assert_for_instances_group(get_logs(output), instances, lambda line: exp_error in line)
def test_invalid_version(cartridge_cmd, project_without_dependencies, tmpdir, pack_format): project = project_without_dependencies bad_versions = [ 'xx1', '1xx', 'xx1.2', '1.2xx', 'xx1.2.3', '1.2.3xx', 'xx1.2.3-4', '1.2.3-4xxx', 'xx1.2.3-4-gdeadbeaf', ] for bad_version in bad_versions: cmd = [ cartridge_cmd, "pack", pack_format, "--version", bad_version, project.path, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 rgx = r"Passed version .+ should be semantic \(major\.minor\.patch\[\-count\]\[\-commit\]\)" assert re.search(rgx, output) is not None
def test_packing_without_git(cartridge_cmd, project_without_dependencies, tmpdir): project = project_without_dependencies # remove .git directory shutil.rmtree(os.path.join(project.path, '.git')) # try to build rpm w/o --version cmd = [ cartridge_cmd, "pack", "rpm", project.path, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert 'Failed to detect version' in output assert 'Please pass it explicitly' in output # pass version explicitly cmd = [ cartridge_cmd, "pack", "rpm", "--version", "0.1.0", project.path, ] process = subprocess.run(cmd, cwd=tmpdir) assert process.returncode == 0 assert '{}-0.1.0-0.rpm'.format(project.name) in os.listdir(tmpdir)
def test_non_existent_func(cartridge_cmd, custom_admin_running_instances, admin_flow, tmpdir): project = custom_admin_running_instances['project'] run_dir = project.get_run_dir() simple_args = { 'help-func': ['non_existent_func', '--help'], 'call': ['non_existent_func'], } args = simple_args[admin_flow] cmd = [ cartridge_cmd, 'admin', '--name', project.name, '--run-dir', run_dir, ] cmd.extend(args) rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert 'Function "non_existent_func" isn\'t found' in output
def test_invalid_base_build_dockerfile(cartridge_cmd, project_without_dependencies, pack_format, tmpdir): bad_dockerfiles = [ "FROM ubuntu:xenial\n", "I am FROM centos:8", ] invalid_dockerfile_path = os.path.join(tmpdir, 'Dockerfile') for bad_dockerfile in bad_dockerfiles: with open(invalid_dockerfile_path, 'w') as f: f.write(bad_dockerfile) cmd = [ cartridge_cmd, "pack", pack_format, "--use-docker", "--build-from", invalid_dockerfile_path, project_without_dependencies.path, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert 'Invalid base build Dockerfile' in output assert 'base image must be centos:8' in output
def test_setup_etcd2_failover(cartridge_cmd, project_with_topology_and_vshard): project = project_with_topology_and_vshard cmd = [ cartridge_cmd, "failover", "set", "stateful", "--state-provider", "etcd2", "--provider-params", "{\"prefix\": \"test_prefix\", \"lock_delay\": 15}", "--params", "{\"fencing_enabled\": true, \"failover_timeout\": 30, \"fencing_timeout\": 12}" ] rc, output = run_command_and_get_output(cmd, cwd=project.path) assert rc == 0 assert "Failover configured successfully" in output failover_info = get_etcd2_failover_info() assert failover_info == { 'fencing_enabled': True, 'failover_timeout': 30, 'fencing_pause': 2, 'fencing_timeout': 12, 'mode': 'stateful', 'state_provider': 'etcd2', 'etcd2_params': { 'endpoints': ['http://127.0.0.1:4001', 'http://127.0.0.1:2379'], 'lock_delay': 15, 'password': '', 'prefix': 'test_prefix', 'username': '' }, }
def test_packing_without_git(cartridge_cmd, project_without_dependencies, tmpdir, pack_format): project = project_without_dependencies # remove .git directory shutil.rmtree(os.path.join(project.path, '.git')) # try to build rpm w/o --version cmd = [ cartridge_cmd, "pack", pack_format, project.path, ] rc, output = run_command_and_get_output(cmd, cwd=tmpdir) assert rc == 1 assert 'Project is not a git project' in output assert 'Please pass version explicitly' in output # pass version explicitly cmd = [ cartridge_cmd, "pack", pack_format, "--version", "0.1.0", project.path, ] process = subprocess.run(cmd, cwd=tmpdir) assert process.returncode == 0
def get_breadcrumbs(url): page_source = urllib.urlopen(url).read() """ check if the product exists (if 'clp-breadcrumb' is not found in the page source, it means that the page redirected to the flipkart home page, which means that the product does not exist) """ if "clp-breadcrumb" not in page_source: return [] # yes, the product exists write_to_output(page_source, output_file) # get only the clp-breadcrumb div command = "awk '/clp-breadcrumb/,/\<\/div\>/' " + output_file output = run_command_and_get_output(command, temp_file) write_to_output(output, output_file) # get only the values between the starting <a and ending </a> command = "awk '/\<a/,/\<\/a\>/' " + output_file output = run_command_and_get_output(command, temp_file) write_to_output(output, output_file) # get only the values between the <a>values...</a>tag command = "ag -o '\\t[^\<^\>]*\\n' " + output_file output = run_command_and_get_output(command, temp_file) write_to_output(output, output_file) # remove all the spaces from the output values command = "ag -o '[^\\t^\\n].*' " + output_file output = run_command_and_get_output(command, temp_file) write_to_output(output, output_file) return_list = [] with open(output_file, "r") as f: for line in f.read().split("\n"): if line not in dont_return_breadcrumbs: return_list.append(line) print(return_list) return return_list