Ejemplo n.º 1
0
def sync_down_folder(path):
    is_docker = os.path.isfile("/.dockerenv")
    if is_docker:
        local_path = "/tmp/%s" % (path)
    else:
        local_path = "%s/%s" % (LOCAL_LOG_DIR, path)

    local_dir = os.path.dirname(local_path)
    os.makedirs(local_dir, exist_ok=True)

    if is_docker:
        from doodad.ec2.autoconfig import AUTOCONFIG

        os.environ["AWS_ACCESS_KEY_ID"] = AUTOCONFIG.aws_access_key()
        os.environ["AWS_SECRET_ACCESS_KEY"] = AUTOCONFIG.aws_access_secret()

    full_s3_path = os.path.join(AWS_S3_PATH, path)
    bucket_name, bucket_relative_path = split_s3_full_path(full_s3_path)
    command = "aws s3 sync s3://%s/%s %s" % (
        bucket_name,
        bucket_relative_path,
        local_path,
    )
    print(command)
    stream = os.popen(command)
    output = stream.read()
    print(output)
    return local_path
Ejemplo n.º 2
0
def sync_down(path, check_exists=True):
    is_docker = os.path.isfile("/.dockerenv")
    if is_docker:
        local_path = "/tmp/%s" % (path)
    else:
        local_path = "%s/%s" % (LOCAL_LOG_DIR, path)

    if check_exists and os.path.isfile(local_path):
        return local_path

    local_dir = os.path.dirname(local_path)
    os.makedirs(local_dir, exist_ok=True)

    if is_docker:
        from doodad.ec2.autoconfig import AUTOCONFIG
        os.environ["AWS_ACCESS_KEY_ID"] = AUTOCONFIG.aws_access_key()
        os.environ["AWS_SECRET_ACCESS_KEY"] = AUTOCONFIG.aws_access_secret()

    full_s3_path = os.path.join(AWS_S3_PATH, path)
    bucket_name, bucket_relative_path = split_s3_full_path(full_s3_path)
    try:
        bucket = boto3.resource('s3').Bucket(bucket_name)
        bucket.download_file(bucket_relative_path, local_path)
    except Exception as e:
        local_path = None
        print("Failed to sync! path: ", path)
        print("Exception: ", e)
    return local_path
Ejemplo n.º 3
0
def _collect_instances(region):
    client = boto3.client(
        "ec2",
        region_name=region,
        aws_access_key_id=AUTOCONFIG.aws_access_key(),
        aws_secret_access_key=AUTOCONFIG.aws_access_secret(),
    )
    print("Collecting instances in region", region)
    instances = [x['Instances'][0] for x in client.describe_instances(
        Filters=[
            {
                'Name': 'instance.group-name',
                'Values': [
                    AUTOCONFIG.aws_security_groups()[0],
                ]
            },
            {
                'Name': 'instance-state-name',
                'Values': [
                    'running'
                ]
            }
        ]
    )['Reservations']]
    for instance in instances:
        instance['Region'] = region
    return instances
Ejemplo n.º 4
0
def get_clients():
    regions = REGIONS
    clients = []
    for region in regions:
        client = boto3.client(
            "ec2",
            region_name=region,
            aws_access_key_id=AUTOCONFIG.aws_access_key(),
            aws_secret_access_key=AUTOCONFIG.aws_access_secret(),
        )
        client.region = region
        clients.append(client)
    return clients
Ejemplo n.º 5
0
    def __init__(self,
            region='us-west-1',
            s3_bucket=None,
            image_id=None,
            aws_key_name=None,
            iam_profile=None,
            **kwargs
            ):
        # find config file
        from doodad.ec2.autoconfig import AUTOCONFIG
        from doodad.ec2.credentials import AWSCredentials
        s3_bucket = AUTOCONFIG.s3_bucket() if s3_bucket is None else s3_bucket
        image_id = AUTOCONFIG.aws_image_id(region) if image_id is None else image_id
        aws_key_name= AUTOCONFIG.aws_key_name(region) if aws_key_name is None else aws_key_name
        iam_profile= AUTOCONFIG.iam_profile_name() if iam_profile is None else iam_profile
        credentials=AWSCredentials(aws_key=AUTOCONFIG.aws_access_key(), aws_secret=AUTOCONFIG.aws_access_secret())
        security_group_ids = AUTOCONFIG.aws_security_group_ids()[region]
        security_groups = AUTOCONFIG.aws_security_groups()

        super(EC2AutoconfigDocker, self).__init__(
                s3_bucket=s3_bucket,
                image_id=image_id,
                aws_key_name=aws_key_name,
                iam_instance_profile_name=iam_profile,
                credentials=credentials,
                region=region,
                security_groups=security_groups,
                security_group_ids=security_group_ids,
                **kwargs
                )
Ejemplo n.º 6
0
def kill_f(pattern):
    print("trying to kill the pattern: ", pattern)
    to_kill = []
    to_kill_ids = {}
    for instance in get_all_instances():
        name = get_name_tag(instance)
        if name is None or pattern in name:
            instance_id = instance['InstanceId']
            region = instance['Region']
            if name is None:
                if any([x['GroupName'] in AUTOCONFIG.aws_security_groups() for x in instance['SecurityGroups']]):
                    if query_yes_no(question="Kill instance {} without name in region {} (security groups {})?".format(
                            instance_id, region, [x['GroupName'] for x in instance['SecurityGroups']])):
                        name = instance_id
            if name:
                if region not in to_kill_ids:
                    to_kill_ids[region] = []
                to_kill_ids[region].append(instance_id)
                to_kill.append(name)

    print("This will kill the following jobs:")
    print(", ".join(sorted(to_kill)))
    if query_yes_no(question="Proceed?", default="no"):
        for client in get_clients():
            print("Terminating instances in region", client.region)
            ids = to_kill_ids.get(client.region, [])
            if len(ids) > 0:
                client.terminate_instances(
                    InstanceIds=to_kill_ids.get(client.region, [])
                )
Ejemplo n.º 7
0
 def __init__(self, s3_path, s3_bucket=None, sync_interval=15, output=False,
         include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar', '*.log', '*.pkl'), **kwargs):
     super(MountS3, self).__init__(**kwargs)
     if s3_bucket is None:
         # load from config
         from doodad.ec2.autoconfig import AUTOCONFIG
         s3_bucket = AUTOCONFIG.s3_bucket()
     self.s3_bucket = s3_bucket
     self.s3_path = s3_path
     self.output = output
     self.sync_interval = sync_interval
     self.sync_on_terminate = True
     self.include_types = include_types
Ejemplo n.º 8
0
def ssh(job):
    for instance in get_all_instances():
        name = get_name_tag(instance)
        if name == job:
            ip_addr = instance['PublicIpAddress']
            key_path = AUTOCONFIG.aws_key_path(instance['Region'])
            command = " ".join([
                "ssh",
                "-oStrictHostKeyChecking=no",
                "-oConnectTimeout=10",
                "-i",
                key_path,
                "-t",
                "ubuntu@" + ip_addr,
            ])
            print(command)
            os.system(command)
            return
    print("Not found!")