Beispiel #1
0
def stop_cluster(argv):
    if len(argv) > 0:
        failure = False
        errors = ""
        for node in argv:
            (retval, err) = utils.stopCluster(node)
            if retval != 0:
                failure = True
                errors = errors + err+"\n"
        if failure:
            utils.err("unable to stop all nodes\n" + errors.rstrip())
        return

    print "Stopping Cluster..."
    output, retval = utils.run(["service", "pacemaker","stop"])
    if retval != 0:
        print output,
        utils.err("unable to stop pacemaker")
    if utils.is_rhel6():
        output, retval = utils.run(["service", "cman","stop"])
        if retval != 0:
            print output,
            utils.err("unable to stop cman")
    else:
        output, retval = utils.run(["service", "corosync","stop"])
        if retval != 0:
            print output,
            utils.err("unable to stop corosync")
Beispiel #2
0
 def prepare(self):
     """remove temporary files, create the directory structure"""
     utils.rmtree(self.work_path)
     utils.make_dirs(os.path.join(self.work_iso, 'seedbank/etc/runonce.d'))
     utils.make_dirs(self.work_initrd)
     utils.run('bsdtar -C "%s" -xf "%s"' % (self.work_iso, self.iso_file))
     utils.run('chmod -R u+w "%s"' % self.work_iso)
def setup_tablets():
  # Start up a master mysql and vttablet
  utils.debug("Setting up tablets")
  utils.run_vtctl('CreateKeyspace test_keyspace')
  master_tablet.init_tablet('master', 'test_keyspace', '0')
  utils.run_vtctl('RebuildShardGraph test_keyspace/0')
  utils.run_vtctl('RebuildKeyspaceGraph test_keyspace')
  utils.validate_topology()

  setup_schema()
  replica_tablet.create_db('vt_test_keyspace')
  master_tablet.start_vttablet(memcache=True)

  replica_tablet.init_tablet('idle', 'test_keyspace', start=True, memcache=True)
  snapshot_dir = os.path.join(utils.vtdataroot, 'snapshot')
  utils.run("mkdir -p " + snapshot_dir)
  utils.run("chmod +w " + snapshot_dir)
  utils.run_vtctl('Clone -force %s %s' %
                  (master_tablet.tablet_alias, replica_tablet.tablet_alias))

  utils.run_vtctl('Ping test_nj-0000062344')
  utils.run_vtctl('SetReadWrite ' + master_tablet.tablet_alias)
  utils.check_db_read_write(62344)

  utils.validate_topology()
  utils.run_vtctl('Ping test_nj-0000062345')
  utils.run_vtctl('ChangeSlaveType test_nj-0000062345 replica')
Beispiel #4
0
 def hook_enable(self):
     """apply PXE variables on the configured enable hook(s) and run the
     hook(s)"""
     for hook in cfg['hooks_pxe']['enable']:
         hook = utils.apply_template(hook, self.pxe_variables)
         logging.info('found enable hook "%s"', hook)
         utils.run(hook, error=True)
    def test_regular_operation(self):
        # Use a dedicated worker to run all vtworker commands.
        worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(["--cell", "test_nj"], auto_log=True)
        vtworker_endpoint = "localhost:" + str(worker_rpc_port)

        automation_server_proc, automation_server_port = utils.run_automation_server()

        source_shard_list = "0"
        dest_shard_list = "-80,80-"
        _, vtctld_endpoint = utils.vtctld.rpc_endpoint()
        utils.run(
            environment.binary_argstr("automation_client")
            + " --server localhost:"
            + str(automation_server_port)
            + " --task HorizontalReshardingTask"
            + " --param keyspace="
            + self.KEYSPACE
            + " --param source_shard_list="
            + source_shard_list
            + " --param dest_shard_list="
            + dest_shard_list
            + " --param vtctld_endpoint="
            + vtctld_endpoint
            + " --param vtworker_endpoint="
            + vtworker_endpoint
            + " --param min_healthy_rdonly_tablets=1"
        )

        self.verify()

        utils.kill_sub_process(automation_server_proc, soft=True)
        utils.kill_sub_process(worker_proc, soft=True)
Beispiel #6
0
  def test_sigterm(self):
    utils.run_vtctl('CreateKeyspace test_keyspace')

    # create the database so vttablets start, as it is serving
    tablet_62344.create_db('vt_test_keyspace')

    tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

    # start a 'vtctl Sleep' command in the background
    args = [environment.binary_path('vtctl'),
            '-log_dir', environment.vtlogroot,
            '--alsologtostderr']
    args.extend(environment.topo_server_flags())
    args.extend(environment.tablet_manager_protocol_flags())
    args.extend(['Sleep', tablet_62344.tablet_alias, '60s'])
    sp = utils.run_bg(args, stdout=PIPE, stderr=PIPE)

    # wait for it to start, and let's kill it
    time.sleep(4.0)
    utils.run(['pkill', 'vtaction'])
    out, err = sp.communicate()

    # check the vtctl command got the right remote error back
    if "vtaction interrupted by signal" not in err:
      self.fail("cannot find expected output in error: " + err)
    logging.debug("vtaction was interrupted correctly:\n" + err)

    tablet_62344.kill_vttablet()
Beispiel #7
0
def test_table_and_columns_query(executor):
    run(executor, "create table a(x text, y text)")
    run(executor, "create table b(z text)")

    assert set(executor.tables()) == set([('a',), ('b',)])
    assert set(executor.table_columns()) == set(
            [('a', 'x'), ('a', 'y'), ('b', 'z')])
Beispiel #8
0
def run_test_sigterm():
  utils.zk_wipe()
  utils.run_vtctl('CreateKeyspace -force test_keyspace')

  # create the database so vttablets start, as it is serving
  tablet_62344.create_db('vt_test_keyspace')

  tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

  # start a 'vtctl Sleep' command in the background
  sp = utils.run_bg(utils.vtroot+'/bin/vtctl -logfile=/dev/null Sleep %s 60s' %
                    tablet_62344.tablet_alias,
                    stdout=PIPE, stderr=PIPE)

  # wait for it to start, and let's kill it
  time.sleep(2.0)
  utils.run(['pkill', 'vtaction'])
  out, err = sp.communicate()

  # check the vtctl command got the right remote error back
  if "vtaction interrupted by signal" not in err:
    raise utils.TestError("cannot find expected output in error:", err)
  utils.debug("vtaction was interrupted correctly:\n" + err)

  tablet_62344.kill_vttablet()
Beispiel #9
0
  def test_get_srv_keyspace_names(self):
    utils.run_vtctl('CreateKeyspace test_keyspace1')
    utils.run_vtctl('CreateKeyspace test_keyspace2')
    t1 = tablet.Tablet(tablet_uid=1, cell="nj")
    t1.init_tablet("master", "test_keyspace1", "0")
    t1.update_addrs()
    t2 = tablet.Tablet(tablet_uid=2, cell="nj")
    t2.init_tablet("master", "test_keyspace2", "0")
    t2.update_addrs()
    utils.run_vtctl('RebuildKeyspaceGraph /zk/global/vt/keyspaces/*', auto_log=True)
    self.assertItemsEqual(self.topo.get_srv_keyspace_names('local'), ["test_keyspace1", "test_keyspace2"])

    # zkocc API test
    utils.prog_compile(['zkclient2'])
    out, err = utils.run(utils.vtroot+'/bin/zkclient2 -server localhost:%u -mode getSrvKeyspaceNames test_nj' % utils.zkocc_port_base, trap_output=True)
    self.assertEqual(err, "KeyspaceNames[0] = test_keyspace1\n" +
                          "KeyspaceNames[1] = test_keyspace2\n")

    # vtgate zk API test
    out, err = utils.run(utils.vtroot+'/bin/zkclient2 -server localhost:%u -mode getSrvKeyspaceNames test_nj' % self.vtgate_zk_port, trap_output=True)
    self.assertEqual(err, "KeyspaceNames[0] = test_keyspace1\n" +
                          "KeyspaceNames[1] = test_keyspace2\n")

    # vtgate zkocc API test
    out, err = utils.run(utils.vtroot+'/bin/zkclient2 -server localhost:%u -mode getSrvKeyspaceNames test_nj' % self.vtgate_zkocc_port, trap_output=True)
    self.assertEqual(err, "KeyspaceNames[0] = test_keyspace1\n" +
                          "KeyspaceNames[1] = test_keyspace2\n")
Beispiel #10
0
def test_jsonb_renders_without_u_prefix(executor, expanded):
    run(executor, "create table jsonbtest(d jsonb)")
    run(executor, """insert into jsonbtest (d) values ('{"name": "Éowyn"}')""")
    result = run(executor, "SELECT d FROM jsonbtest LIMIT 1",
                 join=True, expanded=expanded)

    assert u'{"name": "Éowyn"}' in result
  def test_regular_operation(self):
    # Use a dedicated worker to run all vtworker commands.
    worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj'],
        auto_log=True)
    vtworker_endpoint = "localhost:" + str(worker_rpc_port)

    automation_server_proc, automation_server_port = utils.run_automation_server()

    keyspace = 'test_keyspace'
    source_shard_list = '0'
    dest_shard_list = '-80,80-'
    _, vtctld_endpoint = utils.vtctld.rpc_endpoint()
    utils.run(environment.binary_argstr('automation_client') +
              ' --server localhost:' + str(automation_server_port) +
              ' --task HorizontalReshardingTask' +
              ' --param keyspace=' + keyspace +
              ' --param source_shard_list=' + source_shard_list +
              ' --param dest_shard_list=' + dest_shard_list +
              ' --param source_shard_rdonly_list=' +
              worker.shard_rdonly1.tablet_alias +
              ' --param dest_shard_rdonly_list=' +
              worker.shard_0_rdonly1.tablet_alias + ',' +
              worker.shard_1_rdonly1.tablet_alias +
              ' --param vtctld_endpoint=' + vtctld_endpoint +
              ' --param vtworker_endpoint=' + vtworker_endpoint)

    self.assert_shard_data_equal(0, worker.shard_master,
                                 worker.shard_0_tablets.replica)
    self.assert_shard_data_equal(1, worker.shard_master,
                                 worker.shard_1_tablets.replica)

    utils.kill_sub_process(automation_server_proc, soft=True)
    utils.kill_sub_process(worker_proc, soft=True)
Beispiel #12
0
def run_app(app):
    app_module = utils.get_app_module(app)
    require_password = config.get(app,'REQUIRE_PASSWORD') == "True"
    if(require_password):
        password_hash = get_password_hash(app_module)
        pass_param = " -p " + password_hash
    else:
        pass_param = ""

    # register job with broker
    cmd = BROKER_PATH + " -r -a " + app + pass_param
    data = utils.run(cmd = cmd, local_host = SERVER_IP, remote_host = BROKER_IP)
    job = json.loads(data[0])

    # run the new job
    cmd = prep_run_command(app_module, job)
    proc = Popen(cmd)
    child_pid = proc.pid

    # give broker the job's pid
    key_values = {"job_id": job["job_id"], "pid" : child_pid}
    key_values = json.dumps(key_values)
    cmd = BROKER_PATH + " -u '{}'".format(key_values)
    utils.run(cmd = cmd, local_host = SERVER_IP, remote_host = BROKER_IP)
    (output, error) = proc.communicate()

    if error:
        print("error:", error)
    print("output:", output)
Beispiel #13
0
def test_unicode_support_in_output(executor, expanded):
    run(executor, "create table unicodechars(t text)")
    run(executor, "insert into unicodechars (t) values ('é')")

    # See issue #24, this raises an exception without proper handling
    assert u'é' in run(executor, "select * from unicodechars",
                       join=True, expanded=expanded)
Beispiel #14
0
    def test_annotate_coverage_filename(self):
        "Does the '-c' flag work for figleaf-annotate?"

        TEST_FILENAME = '.figleaf_blah'
        
        # fail to list anything (no .figleaf_blah file)
        status, out, errout = utils.run('figleaf-annotate', 'list',
                                        '-c', TEST_FILENAME)
        assert status != 0

        # now run coverage...
        status, out, errout = utils.run('figleaf', 'tst-cover.py')
        print out, errout
        assert status == 0

        # rename coverage output file
        os.rename('.figleaf', TEST_FILENAME)

        # now list files that are covered in that recording...
        status, out, errout = utils.run('figleaf-annotate', 'list',
                                        '-c', TEST_FILENAME)
        assert status == 0
        assert out.find("\ntst-cover.py\n")

        # be sure to remove the file.
        os.unlink(TEST_FILENAME)
Beispiel #15
0
def update_master():
    """
    Master branch is now checked out and needs updating.
    """
    para("Step 4 of 5: Commit versions and push changes to master.")
    utils.check_or_exit("Is your git repository now on master")

    # Update the master files.
    utils.update_files(MASTER_VERSION_REPLACE, release_data["versions"],
                       is_release=False)

    new_version = release_data["versions"]["version"]
    para("The master codebase has now been updated to reference the latest "
         "release.")
    para("Commit changes to master")
    run("git add --all")
    run('git commit -m "Update docs to version %s"' % new_version)

    actions()
    bullet("Self review the latest changes to master")
    bullet("Run: git diff origin/master", level=1)
    bullet("Push changes to master")
    bullet("Run: git push origin master", level=1)
    bullet("Verify builds are working")
    next("Once complete, re-run the script")
Beispiel #16
0
def disable(address):
    """disable a pxelinux configuration file by renaming the file"""
    file_name = os.path.join(cfg['paths']['tftpboot'], 'pxelinux.cfg', address)
    utils.file_move(file_name, file_name + '.disabled')
    if cfg['hooks_pxe']['disable']:
        for cmd in cfg['hooks_pxe']['disable']:
            utils.run(cmd)
Beispiel #17
0
def start_cluster(argv):
    if len(argv) > 0:
        start_cluster_nodes(argv)
        return

    print "Starting Cluster..."
    if utils.is_rhel6():
#   Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0
        retval, output = commands.getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]')
        if retval == 0:
            with open("/etc/sysconfig/cman", "a") as cman_conf_file:
                cman_conf_file.write("\nCMAN_QUORUM_TIMEOUT=0\n")

        output, retval = utils.run(["service", "cman","start"])
        if retval != 0:
            print output
            utils.err("unable to start cman")
    else:
        output, retval = utils.run(["service", "corosync","start"])
        if retval != 0:
            print output
            utils.err("unable to start corosync")
    output, retval = utils.run(["service", "pacemaker", "start"])
    if retval != 0:
        print output
        utils.err("unable to start pacemaker")
Beispiel #18
0
def spawn_sip(sipfile, generated_src_dir):
    name, _ext = os.path.splitext(sipfile)

    # need to create generated_src_dir if it doesn't exist
    if not os.path.isdir(generated_src_dir):
        os.makedirs(generated_src_dir)

    # ask SIP to create a SBF file so we know which sources were generated
    sbf_filename = os.path.join(generated_src_dir, "%s.sbf" % name)

    import sipconfig

    cfg = sipconfig.Configuration()
    args = [
        cfg.sip_bin,
        "-c",
        generated_src_dir,  # generated sources go here
        "-b",
        sbf_filename,  # SBF output file describing the module's inputs
        sipfile,
    ]

    # spawn SIP, ensuring that a new SBF file is created
    mtime = os.path.getmtime(sbf_filename) if os.path.isfile(sbf_filename) else 0
    run(args)
    assert os.path.getmtime(sbf_filename) > mtime

    return parse_sbf(sbf_filename)
Beispiel #19
0
def test_functions_query(executor):
    run(executor, '''create function func1() returns int
                     language sql as $$select 1$$''')
    run(executor, 'create schema schema1')
    run(executor, '''create function schema1.func2() returns int
                     language sql as $$select 2$$''')

    run(executor, '''create function func3()
                     returns table(x int, y int) language sql
                     as $$select 1, 2 from generate_series(1,5)$$;''')

    run(executor, '''create function func4(x int) returns setof int language sql
                     as $$select generate_series(1,5)$$;''')

    funcs = set(executor.functions())
    assert funcs >= set([
        FunctionMetadata('public', 'func1', '',
                         'integer', False, False, False),
        FunctionMetadata('public', 'func3', '',
                         'TABLE(x integer, y integer)', False, False, True),
        FunctionMetadata('public', 'func4', 'x integer',
                         'SETOF integer', False, False, True),
        FunctionMetadata('schema1', 'func2', '',
                         'integer', False, False, False),
      ])
Beispiel #20
0
def full_status():
    if "--full" in utils.pcs_options:
        (output, retval) = utils.run(["crm_mon", "-1", "-r", "-R", "-A", "-f"])
    else:
        (output, retval) = utils.run(["crm_mon", "-1", "-r"])

    if (retval != 0):
        utils.err("cluster is not currently running on this node")

    if not utils.usefile or "--corosync_conf" in utils.pcs_options:
        cluster_name = utils.getClusterName()
        print "Cluster name: %s" % cluster_name

    if utils.stonithCheck():
        print("WARNING: no stonith devices and stonith-enabled is not false")

    if utils.corosyncPacemakerNodeCheck():
        print("WARNING: corosync and pacemaker node names do not match (IPs used in setup?)")

    print output

    if not utils.usefile:
        if not utils.is_rhel6():
            print "PCSD Status:"
            cluster.cluster_gui_status([],True)
            print ""
        utils.serviceStatus("  ")
  def test_regular_operation(self):
    # Use a dedicated worker to run all vtworker commands.
    worker_proc, _, worker_rpc_port = utils.run_vtworker_bg(
        ['--cell', 'test_nj'],
        auto_log=True)
    vtworker_endpoint = 'localhost:' + str(worker_rpc_port)

    automation_server_proc, automation_server_port = (
        utils.run_automation_server())

    source_shard_list = '0'
    dest_shard_list = '-80,80-'
    _, vtctld_endpoint = utils.vtctld.rpc_endpoint()
    utils.run(
        environment.binary_argstr('automation_client') +
        ' --server localhost:' + str(automation_server_port) +
        ' --task HorizontalReshardingTask' +
        ' --param keyspace=' + self.KEYSPACE +
        ' --param source_shard_list=' + source_shard_list +
        ' --param dest_shard_list=' + dest_shard_list +
        ' --param vtctld_endpoint=' + vtctld_endpoint +
        ' --param vtworker_endpoint=' + vtworker_endpoint +
        ' --param min_healthy_rdonly_endpoints=1')

    self.verify()

    utils.kill_sub_process(automation_server_proc, soft=True)
    utils.kill_sub_process(worker_proc, soft=True)
Beispiel #22
0
  def test_get_srv_keyspace_names(self):
    utils.run_vtctl('CreateKeyspace test_keyspace1')
    utils.run_vtctl('CreateKeyspace test_keyspace2')
    t1 = tablet.Tablet(tablet_uid=1, cell="nj")
    t1.init_tablet("master", "test_keyspace1", "0")
    t1.update_addrs()
    t2 = tablet.Tablet(tablet_uid=2, cell="nj")
    t2.init_tablet("master", "test_keyspace2", "0")
    t2.update_addrs()
    utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace*'], auto_log=True)

    # vtgate API test
    out, err = utils.run(environment.binary_path('zkclient2')+' -server localhost:%u -mode getSrvKeyspaceNames test_nj' % self.vtgate_zk_port, trap_output=True)
    self.assertEqual(err, "KeyspaceNames[0] = test_keyspace1\n" +
                          "KeyspaceNames[1] = test_keyspace2\n")

    if environment.topo_server_implementation == 'zookeeper':
      self.assertItemsEqual(self.topo.get_srv_keyspace_names('local'), ["test_keyspace1", "test_keyspace2"])

      # zkocc API test
      out, err = utils.run(environment.binary_path('zkclient2')+' -server localhost:%u -mode getSrvKeyspaceNames test_nj' % environment.zkocc_port_base, trap_output=True)
      self.assertEqual(err, "KeyspaceNames[0] = test_keyspace1\n" +
                       "KeyspaceNames[1] = test_keyspace2\n")

      # vtgate zkocc API test
      out, err = utils.run(environment.binary_path('zkclient2')+' -server localhost:%u -mode getSrvKeyspaceNames test_nj' % self.vtgate_zkocc_port, trap_output=True)
      self.assertEqual(err, "KeyspaceNames[0] = test_keyspace1\n" +
                       "KeyspaceNames[1] = test_keyspace2\n")
Beispiel #23
0
def test_large_numbers_render_directly(executor, value):
    run(executor, "create table vcli_test.numbertest(a numeric)")
    run(executor,
        "insert into vcli_test.numbertest (a) values ({0})".format(value))

    assert value in run(executor, "select * from vcli_test.numbertest",
                        join=True)
Beispiel #24
0
 def _xbuild(self, **kwargs):
     """
     Run xbuild on the solution
     """
     cmd = '%s %s' % (self.xbuild_path, self.solution_file)
     self.logger.info('This can take some time...')
     run(cmd)
Beispiel #25
0
 def restore_customrules(self):
   customrules = os.path.join(environment.tmproot, 'customrules.json')
   self.create_customrules(customrules)
   if environment.topo_server().flavor() == 'zookeeper':
     utils.run(
         environment.binary_argstr('zk') + ' cp ' + customrules +
         ' /zk/test_ca/config/customrules/testrules')
Beispiel #26
0
    def iso(self, args):
        """validate the input and if no errors are found build an (unattended)
        installation ISO from a regular install ISO"""
        if not 'seed' in args:
            args.seed = ''
        if not 'config' in args:
            args.config = ''
        if not 'puppet' in args:
            args.puppet = []

        args, config = self._shared(args, 'iso')
        if args.release in config['isos']:
            iso_file = os.path.join(config['paths']['isos'], args.release)
            iso_file += '.iso'
        else:
            raise self.exception('"%s" is not a valid release' % args.release)

        if not os.path.isfile(iso_file):
            raise self.exception('"%s" is a valid release, but the installer '
                'ISO is not available (run "seedbank manage -i %s" to download '
                'the ISO)' % (args.release, args.release))

        if 'isofile' in args and args.isofile:
            iso_dst = os.path.abspath(args.isofile)
        else:
            iso_dst = os.path.join(os.getcwd(), '%s.iso' % args.fqdn)

        if os.path.isfile(iso_dst):
            logging.warning('"%s" already exists, will overwrite', iso_dst)

        build = iso.Build(config, iso_file, args.fqdn, iso_dst)
        build.prepare()

        if args.puppet:
            build.add_puppet_manifests(args.fqdn)

        template_cfg = settings.template(args.fqdn, args.overlay, args.config,
            args.variables)

        if args.overlay:
            overlay = pimp.Overlay(self.cfg, args.overlay, args.fqdn)
            overlay.prepare(template_cfg['seed'])
            permissions = pimp.OverlayPermissions(self.cfg)
            permissions.script(overlay.dst, args.overlay, '/target')

        seed = pimp.SeedPimp(template_cfg, 'iso')
        preseed = seed.pimp(args.seeds, args.overlay, args.puppet)
        build.add_preseed(preseed)
        distribution = args.release.split('-')[0]
        build.add_templates(distribution)
        if args.overlay:
            build.add_overlay(overlay.dst)
        build.non_free_firmware(args.release)
        build.rebuild_initrd()
        build.create()
        logging.info('ISO "%s" has been created', iso_dst)
        for hook in self.cfg['hooks_iso']['enable']:
            #hook = utils.apply_template(hook, self.pxe_variables)
            logging.info('found enable hook "%s"', hook)
            utils.run(hook, error=True)
Beispiel #27
0
 def test_utils_cleanup(self):
     test_file = '/tmp/' + utils.test_name()
     self.assertFalse(os.path.exists(test_file))
     with utils.cleanup(['rm', test_file]):
         utils.run(['touch', test_file])
         self.assertTrue(os.path.exists(test_file))
     self.assertFalse(os.path.exists(test_file))
Beispiel #28
0
def test_copy_from_local_csv(executor):
    run(executor, """
        create table vcli_test.people (
            name varchar(50),
            age integer)
    """)

    with tempfile.NamedTemporaryFile(delete=False) as f:
        f.write('Alice,20\nBob,30\nCindy,40\n')

    try:
        run(executor, """
            copy vcli_test.people from local '%s' delimiter ','
        """ % f.name)
    finally:
        os.remove(f.name)

    output = run(executor, "select * from vcli_test.people", join=True)
    assert output == dedent("""\
        +--------+-------+
        | name   |   age |
        |--------+-------|
        | Alice  |    20 |
        | Bob    |    30 |
        | Cindy  |    40 |
        +--------+-------+""")
Beispiel #29
0
def action_test(arguments):
    graph = Graph.generate()
    statistic = graph.statistic()
    scores_reference = {n.name: round(v, 10) for n, v in
                        graph.markov_page_rank(arguments.steps,
                                               arguments.probability).items()}
    with tempfile.TemporaryDirectory() as temp_dir:
        filename = os.path.join(temp_dir, 'graph.dot')
        with open(filename, 'wb') as graph_file:
            graph_file.write(('\n'.join(graph.graphviz)).encode('utf-8'))
        process_stats, output_stats = utils.run(arguments.binary,
                                                ['-s', filename],
                                                os.getcwd())
        args = ['-r', str(arguments.steps), '-p', str(arguments.probability),
                filename]
        process_random, output_random = utils.run(arguments.binary,
                                                  args, os.getcwd())
        args = ['-m', str(arguments.steps), '-p', str(arguments.probability),
                filename]
        process_markov, output_markov = utils.run(arguments.binary,
                                                  args, os.getcwd())

    try:
        utils.expect_stats(process_stats, output_stats, graph.name,
                           statistic.num_nodes, statistic.num_edges,
                           statistic.min_in, statistic.max_in,
                           statistic.min_out, statistic.max_out)
        utils.expect_scores(process_random, output_random, scores_reference,
                            arguments.delta)
        utils.expect_scores(process_markov, output_markov, scores_reference)
    except utils.TestFailure as error:
        print(error)
Beispiel #30
0
def get_default_properties():
    (output, retVal) = utils.run([settings.pengine_binary, "metadata"])
    if retVal != 0:
        utils.err("unable to get pengine metadata\n"+output)
    pe_root = ET.fromstring(output)

    (output, retVal) = utils.run([settings.crmd_binary, "metadata"])
    if retVal != 0:
        utils.err("unable to get crmd metadata\n"+output)
    crmd_root = ET.fromstring(output)
    
    (output, retVal) = utils.run([settings.cib_binary, "metadata"])
    if retVal != 0:
        utils.err("unable to get cib metadata\n"+output)
    cib_root = ET.fromstring(output)

    parameters = {}
    for root in [pe_root, crmd_root, cib_root]:
        for param in root.getiterator('parameter'):
            name = param.attrib["name"]
            content = param.find("content")
            if content is not None:
                default = content.attrib["default"]
            else:
                default = ""

            parameters[name] =  default
    return parameters
Beispiel #31
0
def test_twoadapters():
    """two adapters"""
    run("-a AATTTCAGGAATT -a GTTCTCTAGTTCT", "twoadapters.fasta",
        "twoadapters.fasta")
Beispiel #32
0
def test_read_wildcard():
    """test wildcards in reads"""
    run("--match-read-wildcards -b ACGTACGT", "wildcard.fa", "wildcard.fa")
Beispiel #33
0
def test_suffix():
    """-y/--suffix parameter, combined with _F3"""
    run("-c -e 0.12 -a 1=330201030313112312 -y _my_suffix_{name} --strip-f3",
        "suffix.fastq", "solid.csfasta", 'solid.qual')
Beispiel #34
0
def test_gz_multiblock():
    """compressed gz file with multiple blocks (created by concatenating two .gz files)"""
    run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.gz")
Beispiel #35
0
def test_mask_adapter():
    """mask adapter with N (reads maintain the same length)"""
    run("-b CAAG -n 3 --mask-adapter", "anywhere_repeat.fastq",
        "anywhere_repeat.fastq")
Beispiel #36
0
def test_polya_brace_notation():
    """poly-A tails"""
    run("-m 24 -O 10 -a A{35}", "polya.fasta", "polya.fasta")
Beispiel #37
0
def test_polya():
    """poly-A tails"""
    run("-m 24 -O 10 -a AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "polya.fasta",
        "polya.fasta")
Beispiel #38
0
def test_qualbase():
    """-q with low qualities, using ascii(quality+64) encoding"""
    run("-q 10 --quality-base 64 -a XXXXXX", "illumina64.fastq",
        "illumina64.fastq")
Beispiel #39
0
def test_overlap_b():
    """-O/--overlap with -b"""
    run("-O 10 -b TTAGACATATCTCCGTCG -N", "overlapb.fa", "overlapb.fa")
Beispiel #40
0
def test_quality_trim_only():
    """only trim qualities, do not remove adapters"""
    run("-q 10 --quality-base 64", "illumina64.fastq", "illumina64.fastq")
Beispiel #41
0
def test_length_tag():
    """454 data; -n and --length-tag"""
    run(
        "-n 3 -e 0.1 --length-tag length= "
        "-b TGAGACACGCAACAGGGGAAAGGCAAGGCACACAGGGGATAGG "
        "-b TCCATCTCATCCCTGCGTGTCCCATCTGTTCCCTCCCTGTCTCA", '454.fa', '454.fa')
Beispiel #42
0
def test_qualtrim():
    """-q with low qualities"""
    run("-q 10 -a XXXXXX", "lowqual.fastq", "lowqual.fastq")
Beispiel #43
0
def test_maximum_length():
    """-M/--maximum-length"""
    run("-c -M 5 -a 330201030313112312", "maxlen.fa", "lengths.fa")
Beispiel #44
0
def test_overlap_a():
    """-O/--overlap with -a (-c omitted on purpose)"""
    run("-O 10 -a 330201030313112312 -e 0.0 -N", "overlapa.fa", "overlapa.fa")
Beispiel #45
0
  def start_vttablet(self, port=None, auth=False, memcache=False,
                     wait_for_state="SERVING", customrules=None,
                     schema_override=None, cert=None, key=None, ca_cert=None,
                     repl_extra_flags={},table_acl_config=None,
                     target_tablet_type=None, lameduck_period=None,
                     extra_args=None, full_mycnf_args=False,
                     security_policy=None):
    """
    Starts a vttablet process, and returns it.
    The process is also saved in self.proc, so it's easy to kill as well.
    """
    environment.prog_compile('vtaction')
    args = environment.binary_args('vttablet') + [
            '-port', '%s' % (port or self.port),
            '-tablet-path', self.tablet_alias,
            '-log_dir', environment.vtlogroot]
    args.extend(environment.topo_server_flags())
    args.extend(utils.binlog_player_protocol_flags)

    dbconfigs = self._get_db_configs_file(repl_extra_flags)
    for key1 in dbconfigs:
      for key2 in dbconfigs[key1]:
        args.extend(["-db-config-"+key1+"-"+key2, dbconfigs[key1][key2]])

    if full_mycnf_args:
      # this flag is used to specify all the mycnf_ flags, to make
      # sure that code works and can fork actions.
      relay_log_path = os.path.join(self.tablet_dir, "relay-logs",
                                    "vt-%010d-relay-bin" % self.tablet_uid)
      args.extend([
          "-mycnf_server_id", str(self.tablet_uid),
          "-mycnf_mysql_port", str(self.mysql_port),
          "-mycnf_data_dir", os.path.join(self.tablet_dir, "data"),
          "-mycnf_innodb_data_home_dir", os.path.join(self.tablet_dir,
                                                      "innodb", "data"),
          "-mycnf_innodb_log_group_home_dir", os.path.join(self.tablet_dir,
                                                           "innodb", "logs"),
          "-mycnf_socket_file", os.path.join(self.tablet_dir, "mysql.sock"),
          "-mycnf_error_log_path", os.path.join(self.tablet_dir, "error.log"),
          "-mycnf_slow_log_path", os.path.join(self.tablet_dir,
                                               "slow-query.log"),
          "-mycnf_relay_log_path", relay_log_path,
          "-mycnf_relay_log_index_path", relay_log_path + ".index",
          "-mycnf_relay_log_info_path", os.path.join(self.tablet_dir,
                                                     "relay-logs",
                                                     "relay-log.info"),
          "-mycnf_bin_log_path", os.path.join(self.tablet_dir, "bin-logs",
                                              "vt-%010d-bin" % self.tablet_uid),
          "-mycnf_master_info_file", os.path.join(self.tablet_dir,
                                                  "master.info"),
          "-mycnf_pid_file", os.path.join(self.tablet_dir, "mysql.pid"),
          "-mycnf_tmp_dir", os.path.join(self.tablet_dir, "tmp"),
          "-mycnf_slave_load_tmp_dir", os.path.join(self.tablet_dir, "tmp"),
          ])

    if memcache:
      args.extend(["-rowcache-bin", environment.memcached_bin()])
      memcache_socket = os.path.join(self.tablet_dir, "memcache.sock")
      args.extend(["-rowcache-socket", memcache_socket])
      args.extend(["-enable-rowcache"])

    if auth:
      args.extend(['-auth-credentials', os.path.join(environment.vttop, 'test', 'test_data', 'authcredentials_test.json')])

    if customrules:
      args.extend(['-customrules', customrules])

    if schema_override:
      args.extend(['-schema-override', schema_override])

    if table_acl_config:
        args.extend(['-table-acl-config', table_acl_config])
        args.extend(['-queryserver-config-strict-table-acl'])

    if cert:
      self.secure_port = environment.reserve_ports(1)
      args.extend(['-secure-port', '%s' % self.secure_port,
                   '-cert', cert,
                   '-key', key])
      if ca_cert:
        args.extend(['-ca_cert', ca_cert])
    if target_tablet_type:
      args.extend(['-target_tablet_type', target_tablet_type,
                   '-health_check_interval', '2s',
                   '-allowed_replication_lag', '30'])
    if lameduck_period:
      args.extend(['-lameduck-period', lameduck_period])
    if extra_args:
      args.extend(extra_args)
    if security_policy:
      args.extend(['-security_policy', security_policy])

    stderr_fd = open(os.path.join(self.tablet_dir, "vttablet.stderr"), "w")
    # increment count only the first time
    if not self.proc:
      Tablet.tablets_running += 1
    self.proc = utils.run_bg(args, stderr=stderr_fd)
    stderr_fd.close()

    # wait for zookeeper PID just to be sure we have it
    if environment.topo_server_implementation == 'zookeeper':
      utils.run(environment.binary_argstr('zk')+' wait -e ' + self.zk_pid, stdout=utils.devnull)

    # wait for query service to be in the right state
    if wait_for_state:
      self.wait_for_vttablet_state(wait_for_state, port=port)

    return self.proc
Beispiel #46
0
def test_too_long():
    """--too-long-output"""
    run("-c -M 5 --too-long-output toolong.tmp.fa -a 330201030313112312",
        "maxlen.fa", "lengths.fa")
    assert_files_equal(datapath('toolong.fa'), "toolong.tmp.fa")
    os.remove('toolong.tmp.fa')
Beispiel #47
0
 def _destroy_lvm(self):
     utils.run("vgremove --yes libbd_fs_tests >/dev/null 2>&1")
     utils.run("pvremove --yes %s >/dev/null 2>&1" % self.loop_dev)
Beispiel #48
0
        InlineNamespaceFix3(),
        ExtDocLinksFix(),
        EnableIfFix(),
        ExternalLinksFix(),
        HeaderOverridesFix()
    ]
    files = [
        path.split(f)
        for f in utils.get_all_files(html_dir, any=('*.html', '*.htm'))
    ]
    if files:
        with futures.ThreadPoolExecutor(
                max_workers=min(len(files), num_threads)) as executor:
            jobs = {
                executor.submit(postprocess_file, dir, file, fixes): file
                for dir, file in files
            }
            for job in futures.as_completed(jobs):
                if _threadError:
                    executor.shutdown(False)
                    break
                else:
                    file = jobs[job]
                    print('Finished processing {}.'.format(file))
        if _threadError:
            return 1


if __name__ == '__main__':
    utils.run(main)
Beispiel #49
0

def get_input(content):
    return content.split('\n')


def get_for_slope(input, y_slope, x_slope):
    count = 0
    y_index = y_slope
    for line in input[x_slope::x_slope]:
        if not line:
            continue

        count += 1 if line[y_index % len(line)] == '#' else 0
        y_index += y_slope
    return count


def get_answer(input):
    return reduce(operator.mul, (
        get_for_slope(input, 1, 1),
        get_for_slope(input, 3, 1),
        get_for_slope(input, 5, 1),
        get_for_slope(input, 7, 1),
        get_for_slope(input, 1, 2),
    ), 1)


if __name__ == '__main__':
    run(__file__, get_input, get_answer)
Beispiel #50
0
    def test_xfs_generic_resize(self):
        """Test generic resize function with an xfs file system"""

        utils.run("pvcreate -ff -y %s >/dev/null 2>&1" % self.loop_dev)
        utils.run("vgcreate -s10M libbd_fs_tests %s >/dev/null 2>&1" %
                  self.loop_dev)
        utils.run("lvcreate -n xfs_test -L50M libbd_fs_tests >/dev/null 2>&1")
        self.addCleanup(self._destroy_lvm)

        lv = "/dev/libbd_fs_tests/xfs_test"

        # clean the device
        succ = BlockDev.fs_clean(lv)

        succ = BlockDev.fs_xfs_mkfs(lv, None)
        self.assertTrue(succ)

        with mounted(lv, self.mount_dir):
            fi = BlockDev.fs_xfs_get_info(lv)
        self.assertTrue(fi)
        self.assertEqual(fi.block_size * fi.block_count, 50 * 1024**2)

        # no change, nothing should happen
        with mounted(lv, self.mount_dir):
            succ = BlockDev.fs_resize(lv, 0)
        self.assertTrue(succ)

        with mounted(lv, self.mount_dir):
            fi = BlockDev.fs_xfs_get_info(lv)
        self.assertTrue(fi)
        self.assertEqual(fi.block_size * fi.block_count, 50 * 1024**2)

        # (still) impossible to shrink an XFS file system
        with mounted(lv, self.mount_dir):
            with self.assertRaises(GLib.GError):
                succ = BlockDev.fs_resize(lv, 40 * 1024**2)

        utils.run("lvresize -L70M libbd_fs_tests/xfs_test >/dev/null 2>&1")
        # should grow
        with mounted(lv, self.mount_dir):
            succ = BlockDev.fs_resize(lv, 0)
        self.assertTrue(succ)
        with mounted(lv, self.mount_dir):
            fi = BlockDev.fs_xfs_get_info(lv)
        self.assertTrue(fi)
        self.assertEqual(fi.block_size * fi.block_count, 70 * 1024**2)

        utils.run("lvresize -L90M libbd_fs_tests/xfs_test >/dev/null 2>&1")
        # should grow just to 80 MiB
        with mounted(lv, self.mount_dir):
            succ = BlockDev.fs_resize(lv, 80 * 1024**2)
        self.assertTrue(succ)
        with mounted(lv, self.mount_dir):
            fi = BlockDev.fs_xfs_get_info(lv)
        self.assertTrue(fi)
        self.assertEqual(fi.block_size * fi.block_count, 80 * 1024**2)

        # should grow to 90 MiB
        with mounted(lv, self.mount_dir):
            succ = BlockDev.fs_resize(lv, 0)
        self.assertTrue(succ)
        with mounted(lv, self.mount_dir):
            fi = BlockDev.fs_xfs_get_info(lv)
        self.assertTrue(fi)
        self.assertEqual(fi.block_size * fi.block_count, 90 * 1024**2)
Beispiel #51
0
def test_invalid_syntax(executor):
    with pytest.raises(psycopg2.ProgrammingError) as excinfo:
        run(executor, 'invalid syntax!')
    assert 'syntax error at or near "invalid"' in str(excinfo.value)
Beispiel #52
0
    def test_generic_wipe(self):
        """Verify that generic signature wipe works as expected"""

        with self.assertRaises(GLib.GError):
            BlockDev.fs_wipe("/non/existing/device", True)

        ret = utils.run("pvcreate -ff -y %s >/dev/null 2>&1" % self.loop_dev)
        self.assertEqual(ret, 0)

        succ = BlockDev.fs_wipe(self.loop_dev, True)
        self.assertTrue(succ)

        # now test the same multiple times in a row
        for i in range(10):
            ret = utils.run("pvcreate -ff -y %s >/dev/null 2>&1" %
                            self.loop_dev)
            self.assertEqual(ret, 0)

            succ = BlockDev.fs_wipe(self.loop_dev, True)
            self.assertTrue(succ)

        # vfat has multiple signatures on the device so it allows us to test the
        # 'all' argument of fs_wipe()
        ret = utils.run("mkfs.vfat -I %s >/dev/null 2>&1" % self.loop_dev)
        self.assertEqual(ret, 0)

        time.sleep(0.5)
        succ = BlockDev.fs_wipe(self.loop_dev, False)
        self.assertTrue(succ)

        # the second signature should still be there
        # XXX: lsblk uses the udev db so it we need to make sure it is up to date
        utils.run("udevadm settle")
        fs_type = check_output(
            ["blkid", "-ovalue", "-sTYPE", "-p", self.loop_dev]).strip()
        self.assertEqual(fs_type, b"vfat")

        # get rid of all the remaining signatures (there could be vfat + PMBR for some reason)
        succ = BlockDev.fs_wipe(self.loop_dev, True)
        self.assertTrue(succ)

        utils.run("udevadm settle")
        fs_type = check_output(
            ["blkid", "-ovalue", "-sTYPE", "-p", self.loop_dev]).strip()
        self.assertEqual(fs_type, b"")

        # now do the wipe all in a one step
        ret = utils.run("mkfs.vfat -I %s >/dev/null 2>&1" % self.loop_dev)
        self.assertEqual(ret, 0)

        succ = BlockDev.fs_wipe(self.loop_dev, True)
        self.assertTrue(succ)

        utils.run("udevadm settle")
        fs_type = check_output(
            ["blkid", "-ovalue", "-sTYPE", "-p", self.loop_dev]).strip()
        self.assertEqual(fs_type, b"")

        # try to wipe empty device
        with six.assertRaisesRegex(self, GLib.GError,
                                   "No signature detected on the device"):
            BlockDev.fs_wipe(self.loop_dev, True)
Beispiel #53
0
def test_schemata_table_views_and_columns_query(executor):
    run(executor, "create table a(x text, y text)")
    run(executor, "create table b(z text)")
    run(executor, "create view d as select 1 as e")
    run(executor, "create schema schema1")
    run(executor, "create table schema1.c (w text)")
    run(executor, "create schema schema2")

    # schemata
    # don't enforce all members of the schemas since they may include postgres
    # temporary schemas
    assert set(executor.schemata()) >= set(
        ['public', 'pg_catalog', 'information_schema', 'schema1', 'schema2'])
    assert executor.search_path() == ['pg_catalog', 'public']

    # tables
    assert set(executor.tables()) >= set([('public', 'a'), ('public', 'b'),
                                          ('schema1', 'c')])

    assert set(executor.table_columns()) >= set([('public', 'a', 'x'),
                                                 ('public', 'a', 'y'),
                                                 ('public', 'b', 'z'),
                                                 ('schema1', 'c', 'w')])

    # views
    assert set(executor.views()) >= set([('public', 'd')])

    assert set(executor.view_columns()) >= set([('public', 'd', 'e')])
Beispiel #54
0
def store_value_and_calculate_rate(logger, value, path):
    logger.debug(
        'Attempting to store value ({value}) and calculate rate with data '
        'from {path}'.format(
            value=value,
            path=path,
        )
    )
    current_time = int(time.time())
    logger.debug('Current time: {current_time}'.format(
        current_time=current_time,
    ))

    old_results = None
    logger.debug('Attempting to load old results')
    try:
        with open(path) as data_handle:
            old_results = json.load(data_handle)
    except IOError:
        logger.debug('Previous results not found.')
        print("Could not open previous data to calculate rate.")
        sys.exit(STATUS_UNKNOWN)
    except ValueError:
        logger.warn('Previous data in {path} was in unknown format'.format(
            path=path,
        ))
        print(
            "Previous data was in an unknown format, cannot calculate rate."
        )
        sys.exit(STATUS_UNKNOWN)
    finally:
        logger.debug('Storing data')
        run(['mkdir', '-p', os.path.dirname(path)])
        with open(path, 'w') as save_handle:
            json.dump(
                obj={
                    'timestamp': current_time,
                    'result': value,
                },
                fp=save_handle,
            )
        logger.debug('Old data stored')

    logger.debug('Old results were: {old_results}'.format(
        old_results=old_results,
    ))

    if 'timestamp' in old_results and 'result' in old_results:
        logger.debug('Calculating rate')
        old_time = old_results['timestamp']
        interval = current_time - old_time
        logger.debug('Interval was: {interval}'.format(interval=interval))
        if interval < 1:
            logger.warn(
                'Previous data collection was too recent, cannot calculate '
                'rate'
            )
            print(
                "Previous data was collected too recently, cannot calculate "
                "rate."
            )
            sys.exit(STATUS_UNKNOWN)
        else:
            difference = value - old_results['result']
            logger.debug('Difference was: {diff}'.format(diff=difference))
            return float(difference)/interval
    else:
        logger.warn(
            'Old data in {path} was incomplete, cannot calculate rate'.format(
                path=path,
            )
        )
        print("Previous data was incomplete, cannot calculate rate.")
        sys.exit(STATUS_UNKNOWN)
Beispiel #55
0
def test_unicode_support_in_unknown_type(executor):
    assert u'日本語' in run(executor, "SELECT '日本語' AS japanese;", join=True)
Beispiel #56
0
def test_datatypes_query(executor):
    run(executor, 'create type foo AS (a int, b text)')

    types = list(executor.datatypes())
    assert types == [('public', 'foo')]
Beispiel #57
0
def test_multiple_queries_same_line_syntaxerror(executor):
    with pytest.raises(psycopg2.ProgrammingError) as excinfo:
        run(executor, "select 'foo'; invalid syntax")
    assert 'syntax error at or near "invalid"' in str(excinfo.value)
Beispiel #58
0
def test_large_numbers_render_directly(executor, value):
    run(executor, "create table numbertest(a numeric)")
    run(executor, "insert into numbertest (a) values ({0})".format(value))

    assert value in run(executor, "select * from numbertest", join=True)
Beispiel #59
0
def test_multiple_queries_with_special_command_same_line(executor, pgspecial):
    result = run(executor, "select 'foo'; \d", pgspecial=pgspecial)
    assert len(result) == 4  # 2 * (output+status)
    assert "foo" in result[0]
    # This is a lame check. :(
    assert "Schema" in result[2]
Beispiel #60
0
def test_special_command_help(executor, pgspecial):
    result = run(executor, '\\?', pgspecial=pgspecial)[0].split('|')
    assert (result[1].find(u'Command') != -1)
    assert (result[2].find(u'Description') != -1)