Пример #1
0
def _main_parent():
    _bind_signals_parent()
    print("Child pid: %s" % _child)
    status = 0
    child_terminated = False
    max_num_exc = 10
    while not child_terminated and max_num_exc > 0:
        try:
            (pid, status) = os.wait()
            child_terminated = True
        except OSError as ose:
            # errno.ECHILD - No child processes
            child_terminated = ose.errno == errno.ECHILD
            if not child_terminated:
                print(
                    "OSError exception while waiting for child process %s, errno: %s - %s"
                    % (_child, errno.errorcode[ose.errno], str(ose)))
        except BaseException as exc:
            print(
                "Unexpected exception while waiting for child process %s, %s: %s"
                % (_child, str(type(exc)), str(exc)))
            max_num_exc -= 1
    print("Child pid: %s - Exit status: %s" % (pid, status))
    # status is a 16-bit number, whose low byte is the signal number that killed the process, and whose high byte is the exit status
    exit(status >> 8)
Пример #2
0
def get_config(path):
    if not os.path.exists(path):
        # create_config(path)
        print("Config file '{}' does not exist.".format(path))
        exit(-3)

    config = configparser.ConfigParser()
    config.read(path)
    return config
Пример #3
0
    def __init__(self):

        self.jobs = []

        __res = Scheduler.create_collection()
        if type(__res) is Error:
            print("Unable to create a db connection.")
            exit(0)

        self.__create_all_pre_req_directories()
Пример #4
0
def daemon_run():
    parser = argparse.ArgumentParser("gps2nextcloud")
    parser.add_argument("--create-daemon", help="create systemd service file /etc/systemd/system/gps2nextcloud.service",
                        action='store_true')
    parser.add_argument("--create-config", help="create initial configuration file", action='store_true')
    parser.add_argument("--config_file", help="path to configuration file", required=False)
    args = parser.parse_args()
    if args.config_file:
        run(args.config_file)
    else:
        exit(-5)
Пример #5
0
def exit(*args, **kwargs):
    """Exit the sketch.

    `exit()` overrides Python's builtin exit() function and makes sure
    that necessary cleanup steps are performed before exiting the
    sketch.

    :param args: positional argumets to pass to Python's builtin
        `exit()` function.

    :param kwargs: keyword-arguments to pass to Python's builtin
        `exit()` function.
    """
    default_sketch.show(visible=False)
    app.quit()
    builtins.exit(*args, **kwargs)
Пример #6
0
def exit(*args, **kwargs):
    """Exit the sketch.

    `exit()` overrides Python's builtin exit() function and makes sure
    that necessary cleanup steps are performed before exiting the
    sketch.

    :param args: positional argumets to pass to Python's builtin
        `exit()` function.

    :param kwargs: keyword-arguments to pass to Python's builtin
        `exit()` function.

    """
    pyglet.app.exit()
    builtins.exit(*args, **kwargs)
Пример #7
0
def main(args):
    global failure
    global success

    for region in args.regions:
        print("Starting work for region %s" % region)
        vpcrelated = prepare_vpc(region)
        subnets = prepare_subnets(vpcrelated, region)
        sgs = prepare_test_sg(vpcrelated["vpc_id"], region)
        parent = os.getppid()
        num_parallel = args.numparallel if args.numparallel else 1
        extra_args = {
            "templateURL": args.templateURL,
            "cookbookURL": args.cookbookURL,
            "batchTemplateURL": args.batchTemplateURL,
        }

        case_list = ["useBadMT", "createAll", "createMT", "useGoodMT"]
        distro_list = args.distros if args.distros else [
            "alinux", "centos6", "centos7", "ubuntu1404", "ubuntu1604"
        ]

        work_queues = {}
        for distro in distro_list:
            if args.keyname:
                prepare_testfiles(distro, vpcrelated["vpc_id"], subnets,
                                  args.keyname, region, extra_args)
            else:
                prepare_testfiles(distro, vpcrelated["vpc_id"], subnets,
                                  "id_rsa", region, extra_args)
            work_queues[distro] = Queue.Queue()
            for case in case_list:
                work_item = {"distro": distro, "case": case}
                work_queues[distro].put(work_item)

        for distro in distro_list:
            for i in range(num_parallel):
                t = threading.Thread(target=test_runner,
                                     args=(work_queues[distro], subnets, sgs,
                                           region))
                t.daemon = True
                t.start()

        all_finished = False
        self_killed = False
        while not all_finished:
            time.sleep(1)
            all_finished = True
            for queue in work_queues.values():
                all_finished = all_finished and queue.unfinished_tasks == 0
            # In the case parent process was SIGKILL-ed
            if not _proc_alive(parent) and not self_killed:
                print("Parent process with pid %s died - terminating..." %
                      parent)
                _killme_gently()
                self_killed = True

        print("%s - Regions workers queues all done: %s" %
              (_time(), all_finished))
        print("Currently %s success and %s failure" % (success, failure))

        for distro in distro_list:
            clean_up_testfiles(distro, region)
        # print status...
        clean_up_resources(vpcrelated, subnets, sgs, region)

    print("%s success %s failure, expected %s success and %s failure" %
          (success, failure, 3 * len(args.distros) * len(args.regions),
           len(args.distros) * len(args.regions)))
    if failure != len(args.distros) * len(args.regions) or success != 3 * len(
            args.distros) * len(args.regions):
        exit(1)
def main(args):
    global failure
    global success
    total_success = 0
    total_failure = 0

    for region in args.regions:
        print("Starting work for region %s" % region)
        failure = 0
        success = 0
        client = boto3.client("ec2", region_name=region)
        response = client.describe_tags(Filters=[{
            "Name":
            "key",
            "Values": ["ParallelClusterTestSubnet"]
        }],
                                        MaxResults=16)
        if not args.mastersubnet:
            if len(response["Tags"]) == 0:
                print(
                    "Could not find subnet in %s with ParallelClusterTestSubnet tag.  Aborting."
                    % (region))
                exit(1)
            subnetid = response["Tags"][0]["ResourceId"]
            if subnetid is None:
                print("Error: Subnet ID is None")

            response = client.describe_subnets(SubnetIds=[subnetid])
            if len(response) == 0:
                print("Could not find subnet info for %s" % (subnetid))
                exit(1)
            vpcid = response["Subnets"][0]["VpcId"]

            if vpcid is None:
                print("Error: VPC ID is None")

            print("VPCId: %s; SubnetId %s" % (vpcid, subnetid))
            setup[region] = {"vpc": vpcid, "subnet": subnetid}

        key_name = args.keyname
        parent = os.getppid()
        num_parallel = args.numparallel if args.numparallel else 1
        extra_args = {
            "templateURL":
            args.templateURL,
            "cookbookURL":
            args.cookbookURL,
            "vpc":
            args.vpcid if args.vpcid else setup[region]["vpc"],
            "master_subnet":
            args.mastersubnet
            if args.mastersubnet else setup[region]["subnet"],
            "region":
            region,
        }
        success_cluster_list = [
            "custom3Vol", "custom5Vol", "default", "custom1Vol"
        ]
        failure_cluster_list = ["custom6Vol"]
        distro_list = args.distros if args.distros else [
            "alinux", "centos6", "centos7", "ubuntu1404", "ubuntu1604"
        ]
        success_work_queues = {}
        failure_work_queues = {}
        for distro in distro_list:
            if key_name:
                prepare_testfiles(distro, key_name, extra_args, region)
            else:
                prepare_testfiles(distro, "id_rsa", extra_args, region)
            success_work_queues[distro] = Queue.Queue()
            failure_work_queues[distro] = Queue.Queue()
            for clustername in success_cluster_list:
                work_item = {"distro": distro, "clustername": clustername}
                success_work_queues[distro].put(work_item)
            for clustername in failure_cluster_list:
                work_item = {"distro": distro, "clustername": clustername}
                failure_work_queues[distro].put(work_item)

        for distro in distro_list:
            for i in range(num_parallel):
                t = threading.Thread(target=test_runner,
                                     args=(success_work_queues[distro],
                                           extra_args))
                t.daemon = True
                t.start()

        all_finished = False
        self_killed = False
        while not all_finished:
            time.sleep(1)
            all_finished = True
            for queue in success_work_queues.values():
                all_finished = all_finished and queue.unfinished_tasks == 0
            # In the case parent process was SIGKILL-ed
            if not _proc_alive(parent) and not self_killed:
                print("Parent process with pid %s died - terminating..." %
                      parent)
                _killme_gently()
                self_killed = True

        print("%s - Distributions workers queues all done: %s" %
              (_time(), all_finished))
        if success != 20 or failure != 0:
            print(
                "ERROR: expected 20 success 0 failure, got %s success %s failure"
                % (success, failure))
            exit(1)

        for distro in distro_list:
            for i in range(num_parallel):
                t = threading.Thread(target=test_runner,
                                     args=(failure_work_queues[distro],
                                           extra_args))
                t.daemon = True
                t.start()

        all_finished = False
        self_killed = False
        while not all_finished:
            time.sleep(1)
            all_finished = True
            for queue in failure_work_queues.values():
                all_finished = all_finished and queue.unfinished_tasks == 0
            # In the case parent process was SIGKILL-ed
            if not _proc_alive(parent) and not self_killed:
                print("Parent process with pid %s died - terminating..." %
                      parent)
                _killme_gently()
                self_killed = True

        print("%s - Distributions workers queues all done: %s" %
              (_time(), all_finished))
        if failure != 5:
            print("ERROR: expected 5 failure, %s failure" % failure)
            exit(1)

        for distro in distro_list:
            clean_up_testfiles(distro, region)
        # print status...

        print("Region %s test finished" % region)

        total_success += success
        total_failure += failure

    print("Expected %s success and %s failure, got %s success and %s failure" %
          (20 * len(args.regions), 5 * len(args.regions), total_success,
           total_failure))
    if total_success == 20 * len(args.regions) and total_failure == 5 * len(
            args.regions):
        print("Test finished")
    else:
        print("FAILURE!")
def exit(code: int = 0):
  import builtins
  builtins.exit(code)
Пример #10
0
def _main_child():
    _bind_signals_child()
    parent = os.getppid()
    print("Parent pid: %s" % parent)
    config = {
        "parallelism":
        3,
        "regions":
        "us-east-1,us-east-2,us-west-1,us-west-2," +
        "ca-central-1,eu-west-1,eu-west-2,eu-central-1," +
        "ap-southeast-1,ap-southeast-2,ap-northeast-1," +
        "ap-south-1,sa-east-1,eu-west-3",
        "distros":
        "alinux,centos6,centos7,ubuntu1404,ubuntu1604",
        "schedulers":
        "sge,slurm,torque",
        "instance_types":
        "c4.xlarge",
        "key_path":
        "",
        "custom_node_url":
        None,
        "custom_cookbook_url":
        None,
        "custom_template_url":
        None,
        "expected_asg_capacity_min":
        0,
        "expected_asg_capacity_max":
        3,
        "expected_compute_nodes_min":
        0,
        "expected_compute_nodes_max":
        3,
    }

    parser = argparse.ArgumentParser(
        description="Test runner for AWS ParallelCluster")
    parser.add_argument("--parallelism",
                        help="Number of tests per region to run in parallel",
                        type=int)
    parser.add_argument("--regions",
                        help="Comma separated list of regions to test",
                        type=str)
    parser.add_argument("--distros",
                        help="Comma separated list of distributions to test",
                        type=str)
    parser.add_argument("--schedulers",
                        help="Comma separated list of schedulers to test",
                        type=str)
    parser.add_argument(
        "--instance-types",
        type=str,
        help=
        "Comma separated list of instance types to use for both Master and Compute nodes",
    )
    parser.add_argument("--key-name",
                        help="Key Pair to use for EC2 instances",
                        type=str,
                        required=True)
    parser.add_argument("--key-path",
                        help="Key path to use for SSH connections",
                        type=str)
    parser.add_argument(
        "--custom-node-url",
        help="S3 URL to a custom aws-parallelcluster-node package",
        type=str)
    parser.add_argument(
        "--custom-cookbook-url",
        help="S3 URL to a custom aws-parallelcluster-cookbook package",
        type=str)
    parser.add_argument(
        "--custom-template-url",
        help="S3 URL to a custom aws-parallelcluster CloudFormation template",
        type=str)
    parser.add_argument(
        "--expected-asg-capacity-min",
        help="Expected number of nodes in the asg after scale-down",
        type=int)
    parser.add_argument(
        "--expected-asg-capacity-max",
        help="Expected number of nodes in the asg after scale-up",
        type=int)
    parser.add_argument(
        "--expected-compute-nodes-min",
        help="Expected number of nodes in the scheduler after scale-down",
        type=int)
    parser.add_argument(
        "--expected-compute-nodes-max",
        help="Expected number of nodes in the scheduler after scale-up",
        type=int)

    for key, value in vars(parser.parse_args()).iteritems():
        if value is not None:
            config[key] = value

    region_list = config["regions"].split(",")
    distro_list = config["distros"].split(",")
    scheduler_list = config["schedulers"].split(",")
    instance_type_list = config["instance_types"].split(",")
    expected_asg_capacity = (config["expected_asg_capacity_min"],
                             config["expected_asg_capacity_max"])
    expected_compute_nodes = (config["expected_compute_nodes_min"],
                              config["expected_compute_nodes_max"])

    print("==> Regions: %s" % (", ".join(region_list)))
    print("==> Instance Types: %s" % (", ".join(instance_type_list)))
    print("==> Distros: %s" % (", ".join(distro_list)))
    print("==> Schedulers: %s" % (", ".join(scheduler_list)))
    print("==> Parallelism: %d" % (config["parallelism"]))
    print("==> Key Pair: %s" % (config["key_name"]))
    print("==> Expected asg capacity: min=%d, max=%d " % expected_asg_capacity)
    print("==> Expected compute nodes: min=%d, max=%d " %
          expected_compute_nodes)

    # Optional params
    if config["key_path"]:
        print("==> Key Path: %s" % (config["key_path"]))
    if config["custom_cookbook_url"]:
        print("==> Custom aws-parallelcluster-cookbook URL: %s" %
              (config["custom_cookbook_url"]))
    if config["custom_node_url"]:
        print("==> Custom aws-parallelcluster-node URL: %s" %
              (config["custom_node_url"]))
    if config["custom_template_url"]:
        print("==> Custom aws-parallelcluster template URL: %s" %
              (config["custom_template_url"]))

    # Populate subnet / vpc data for all regions we're going to test.
    for region in region_list:
        client = boto3.client("ec2", region_name=region)
        response = client.describe_tags(Filters=[{
            "Name":
            "key",
            "Values": ["ParallelClusterTestSubnet"]
        }],
                                        MaxResults=16)
        if len(response["Tags"]) == 0:
            print(
                "Could not find subnet in %s with ParallelClusterTestSubnet tag.  Aborting."
                % (region))
            exit(1)
        subnetid = response["Tags"][0]["ResourceId"]

        response = client.describe_subnets(SubnetIds=[subnetid])
        if len(response) == 0:
            print("Could not find subnet info for %s" % (subnetid))
            exit(1)
        vpcid = response["Subnets"][0]["VpcId"]

        setup[region] = {"vpc": vpcid, "subnet": subnetid}

    work_queues = {}
    # build up a per-region list of work to do
    for region in region_list:
        work_queues[region] = Queue.Queue()
        for distro in distro_list:
            for scheduler in scheduler_list:
                for instance in instance_type_list:
                    work_item = {
                        "distro": distro,
                        "scheduler": scheduler,
                        "instance_type": instance,
                        "expected_asg_capacity": expected_asg_capacity,
                        "expected_compute_nodes": expected_compute_nodes,
                    }
                    work_queues[region].put(work_item)

    # start all the workers
    for region in region_list:
        for i in range(0, config["parallelism"]):
            t = threading.Thread(target=test_runner,
                                 args=(region, work_queues[region],
                                       config["key_name"], config))
            t.daemon = True
            t.start()

    # Wait for all the work queues to be completed in each region
    # WARN: The work_queues[region].join() approach prevents the SIGINT signal to be caught from the main thread,
    #       that is actually blocked in the join.
    all_finished = False
    self_killed = False
    while not all_finished:
        time.sleep(1)
        all_finished = True
        for queue in work_queues.values():
            all_finished = all_finished and queue.unfinished_tasks == 0
        # In the case parent process was SIGKILL-ed
        if not _proc_alive(parent) and not self_killed:
            print("Parent process with pid %s died - terminating..." % parent)
            _killme_gently()
            self_killed = True

    print("%s - Regions workers queues all done: %s" % (_time(), all_finished))

    # print status...
    print("==> Success: %d" % (success))
    print("==> Failure: %d" % (failure))
    if failure != 0:
        exit(1)
def _main_child():
    _bind_signals_child()
    parent = os.getppid()
    print("Parent pid: %s" % parent)
    config = {
        'parallelism':
        3,
        'regions':
        'us-east-1,us-east-2,us-west-1,us-west-2,' +
        'ca-central-1,eu-west-1,eu-west-2,eu-central-1,' +
        'ap-southeast-1,ap-southeast-2,ap-northeast-1,' +
        'ap-south-1,sa-east-1,eu-west-3',
        'distros':
        'alinux,centos6,centos7,ubuntu1404,ubuntu1604',
        'schedulers':
        'sge,slurm,torque',
        'instance_types':
        'c4.xlarge',
        'key_path':
        '',
        'custom_node_url':
        None,
        'custom_cookbook_url':
        None,
        'custom_template_url':
        None
    }

    parser = argparse.ArgumentParser(
        description='Test runner for AWS ParallelCluster')
    parser.add_argument('--parallelism',
                        help='Number of tests per region to run in parallel',
                        type=int,
                        default=3)
    parser.add_argument('--regions',
                        help='Comma separated list of regions to test',
                        type=str)
    parser.add_argument('--distros',
                        help='Comma separated list of distributions to test',
                        type=str)
    parser.add_argument('--schedulers',
                        help='Comma separated list of schedulers to test',
                        type=str)
    parser.add_argument(
        '--instance-types',
        type=str,
        help=
        'Comma separated list of instance types to use for both Master and Compute nodes'
    )
    parser.add_argument('--key-name',
                        help='Key Pair to use for EC2 instances',
                        type=str,
                        required=True)
    parser.add_argument('--key-path',
                        help='Key path to use for SSH connections',
                        type=str)
    parser.add_argument(
        '--custom-node-url',
        help='S3 URL to a custom aws-parallelcluster-node package',
        type=str)
    parser.add_argument(
        '--custom-cookbook-url',
        help='S3 URL to a custom aws-parallelcluster-cookbook package',
        type=str)
    parser.add_argument(
        '--custom-template-url',
        help='S3 URL to a custom AWS ParallelCluster CloudFormation template',
        type=str)

    for key, value in vars(parser.parse_args()).iteritems():
        if not value == None:
            config[key] = value

    region_list = config['regions'].split(',')
    distro_list = config['distros'].split(',')
    scheduler_list = config['schedulers'].split(',')
    instance_type_list = config['instance_types'].split(',')

    print("==> Regions: %s" % (', '.join(region_list)))
    print("==> Instance Types: %s" % (', '.join(instance_type_list)))
    print("==> Distros: %s" % (', '.join(distro_list)))
    print("==> Schedulers: %s" % (', '.join(scheduler_list)))
    print("==> Parallelism: %d" % (config['parallelism']))
    print("==> Key Pair: %s" % (config['key_name']))

    # Optional params
    if config['key_path']:
        print("==> Key Path: %s" % (config['key_path']))
    if config['custom_cookbook_url']:
        print("==> Custom aws-parallelcluster-cookbook URL: %s" %
              (config['custom_cookbook_url']))
    if config['custom_node_url']:
        print("==> Custom aws-parallelcluster-node URL: %s" %
              (config['custom_node_url']))
    if config['custom_template_url']:
        print("==> Custom AWS ParallelCluster template URL: %s" %
              (config['custom_template_url']))

    # Populate subnet / vpc data for all regions we're going to test.
    for region in region_list:
        client = boto3.client('ec2', region_name=region)
        response = client.describe_tags(Filters=[{
            'Name':
            'key',
            'Values': ['ParallelClusterTestSubnet']
        }],
                                        MaxResults=16)
        if len(response['Tags']) == 0:
            print(
                'Could not find subnet in %s with ParallelClusterTestSubnet tag.  Aborting.'
                % (region))
            exit(1)
        subnetid = response['Tags'][0]['ResourceId']

        response = client.describe_subnets(SubnetIds=[subnetid])
        if len(response) == 0:
            print('Could not find subnet info for %s' % (subnetid))
            exit(1)
        vpcid = response['Subnets'][0]['VpcId']

        setup[region] = {'vpc': vpcid, 'subnet': subnetid}

    work_queues = {}
    # build up a per-region list of work to do
    for region in region_list:
        work_queues[region] = Queue.Queue()
        for distro in distro_list:
            for scheduler in scheduler_list:
                for instance in instance_type_list:
                    work_item = {
                        'distro': distro,
                        'scheduler': scheduler,
                        'instance_type': instance
                    }
                    work_queues[region].put(work_item)

    # start all the workers
    for region in region_list:
        for i in range(0, config['parallelism']):
            t = threading.Thread(target=test_runner,
                                 args=(region, work_queues[region],
                                       config['key_name'], config))
            t.daemon = True
            t.start()

    # Wait for all the work queues to be completed in each region
    # WARN: The work_queues[region].join() approach prevents the SIGINT signal to be caught from the main thread,
    #       that is actually blocked in the join.
    all_finished = False
    self_killed = False
    while not all_finished:
        time.sleep(1)
        all_finished = True
        for queue in work_queues.values():
            all_finished = all_finished and queue.unfinished_tasks == 0
        # In the case parent process was SIGKILL-ed
        if not _proc_alive(parent) and not self_killed:
            print("Parent process with pid %s died - terminating..." % parent)
            _killme_gently()
            self_killed = True

    print("%s - Regions workers queues all done: %s" % (_time(), all_finished))

    # print status...
    print("==> Success: %d" % (success))
    print("==> Failure: %d" % (failure))
    if failure != 0:
        exit(1)
Пример #12
0

def main():
    parser = argparse.ArgumentParser("gps2nextcloud")
    parser.add_argument(
        "--create-daemon",
        help=
        "create systemd service file /etc/systemd/system/gps2nextcloud.service",
        action='store_true')
    parser.add_argument("--create-config",
                        help="create initial configuration file",
                        action='store_true')
    parser.add_argument("--config_file",
                        help="path to configuration file",
                        required=False)
    args = parser.parse_args()
    if args.create_daemon:
        create_daemon()
    elif args.create_config:
        create_config(args.config_file)
    elif args.config_file:
        run(args.config_file)
    else:
        parser.error('need arguments')


if __name__ == '__main__':
    main()

exit(0)
Пример #13
0
import builtins

builtins.copyright()
builtins.credits()
builtins.exec()
builtins.exit()
builtins.license()
builtins.object

builtins.quit()
builtins.exit()
builtins.zip()
def main(args):
    global failure
    global success
    total_success = 0
    total_failure = 0

    for region in args.regions:
        print("Starting work for region %s" % region)
        failure = 0
        success = 0
        client = boto3.client('ec2', region_name=region)
        response = client.describe_tags(Filters=[{'Name': 'key',
                                                  'Values': [ 'ParallelClusterTestSubnet' ]}],
                                        MaxResults=16)
        if not args.mastersubnet:
            if len(response['Tags']) == 0:
                print('Could not find subnet in %s with ParallelClusterTestSubnet tag.  Aborting.' %
                      (region))
                exit(1)
            subnetid = response['Tags'][0]['ResourceId']
            if subnetid is None:
                print ("Error: Subnet ID is None")

            response = client.describe_subnets(SubnetIds = [ subnetid ])
            if len(response) == 0 :
                print('Could not find subnet info for %s' % (subnetid))
                exit(1)
            vpcid = response['Subnets'][0]['VpcId']

            if vpcid is None:
                print ("Error: VPC ID is None")

            print ("VPCId: %s; SubnetId %s" % (vpcid, subnetid))
            setup[region] = { 'vpc' : vpcid, 'subnet' : subnetid }

        key_name = args.keyname
        parent = os.getppid()
        numParallel = args.numparallel if args.numparallel else 1
        extra_args = {'templateURL': args.templateURL,
                      'cookbookURL': args.cookbookURL,
                      'vpc': args.vpcid if args.vpcid else setup[region]['vpc'],
                      'master_subnet': args.mastersubnet if args.mastersubnet else setup[region]['subnet'],
                      'region' : region}
        success_cluster_list = ['custom5Vol', 'custom3Vol', 'default', 'custom1Vol']
        failure_cluster_list = ['custom6Vol']
        distro_list = args.distros if args.distros else ['alinux', 'centos6', 'centos7', 'ubuntu1404', 'ubuntu1604']
        success_work_queues = {}
        failure_work_queues = {}
        for distro in distro_list:
            if key_name:
                prepare_testfiles(distro, key_name, extra_args)
            else:
                prepare_testfiles(distro, 'id_rsa', extra_args)
            success_work_queues[distro] = Queue.Queue()
            failure_work_queues[distro] = Queue.Queue()
            for clustername in success_cluster_list:
                work_item = {'distro': distro, 'clustername': clustername}
                success_work_queues[distro].put(work_item)
            for clustername in failure_cluster_list:
                work_item = {'distro': distro, 'clustername': clustername}
                failure_work_queues[distro].put(work_item)

        for distro in distro_list:
            for i in range(numParallel):
                t = threading.Thread(target=test_runner, args=(success_work_queues[distro], extra_args))
                t.daemon = True
                t.start()

        all_finished = False
        self_killed = False
        while not all_finished:
            time.sleep(1)
            all_finished = True
            for queue in success_work_queues.values():
                all_finished = all_finished and queue.unfinished_tasks == 0
            # In the case parent process was SIGKILL-ed
            if not _proc_alive(parent) and not self_killed:
                print("Parent process with pid %s died - terminating..." % parent)
                _killme_gently()
                self_killed = True

        print("%s - Distributions workers queues all done: %s" % (_time(), all_finished))
        if (success != 20 or failure != 0):
            print("ERROR: expected 20 success 0 failure, got %s success %s failure" % (success, failure))
            exit(1)

        for distro in distro_list:
            for i in range(numParallel):
                t = threading.Thread(target=test_runner, args=(failure_work_queues[distro],))
                t.daemon = True
                t.start()

        all_finished = False
        self_killed = False
        while not all_finished:
            time.sleep(1)
            all_finished = True
            for queue in failure_work_queues.values():
                all_finished = all_finished and queue.unfinished_tasks == 0
            # In the case parent process was SIGKILL-ed
            if not _proc_alive(parent) and not self_killed:
                print("Parent process with pid %s died - terminating..." % parent)
                _killme_gently()
                self_killed = True

        print("%s - Distributions workers queues all done: %s" % (_time(), all_finished))
        if (failure != 5):
            print("ERROR: expected 5 failure, %s failure" % (failure))
            exit(1)

        for distro in distro_list:
            clean_up_testfiles(distro)
        # print status...

        print("Region %s test finished" % region)

        total_success += success
        total_failure += failure

    print("Expected %s success and %s failure, got %s success and %s failure"
          % (20*len(args.regions), 5*len(args.regions), total_success, total_failure))
    if total_success == 20*len(args.regions) and total_failure == 5*len(args.regions):
        print("Test finished")
    else:
        print("FAILURE!")
Пример #15
0
def exit(*args, **kwargs):
    """Override the system exit to make sure we perform necessary
        cleanups, etc.
    """
    pyglet.app.exit()
    builtins.exit(*args, **kwargs)
Пример #16
0
            sleep(30)
            continue
    raise RuntimeError("Could not establish ssh connection (or configure) "+ip)    
    
def ConfigureMachine(ip, jsonFileName):
    initialJson = '{ \
        "DigitalOcean" : { \
            "Client ID"     : "None", \
            "API Key"       : "None", \
            "location"      : "None", \
            "image"         : "None", \
            "size"          : "None" \
        },\
        "BaseHostName": "None"\
    }'
    Env(initialJson, ".dynamicMachine", "DYNAMIC_MACHINE_CONFIG")
    configureNode(ip, jsonFileName)

if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser(description='Configure a machine.')
    parser.add_argument('--ip', help='The IP address of the machine.', required=True)
    parser.add_argument('--jsonFile', help='The filename of the JSON file containing the list of commands.',required=False)
    args = parser.parse_args()
    try:
        ConfigureMachine(args.ip, args.jsonFile)
        exit(0)
    except Exception as e:
        print (str(e))
        exit(1)