Beispiel #1
0
def destroy_droplets():
    """ Destroy the droplets - node-1, node-2, node-3 """
    manager = Manager(token=DIGITAL_OCEAN_ACCESS_TOKEN)
    droplets = manager.get_all_droplets()
    for num in range(3):
        node = f'node-{num + 1}'
        droplets = manager.get_all_droplets(tag_name=node)
        for droplet in droplets:
            droplet.destroy()
        print(f'{node} has been destroyed.')
Beispiel #2
0
def get_roles(client_id=None, api_key=None, blacklist=None, ssh_port=22):
    ip_blacklist = blacklist or []
    client_id = client_id or os.getenv("DO_CLIENT_ID")
    api_key = api_key or os.getenv("DO_API_KEY")

    if not client_id or not api_key:
        print("You have to provide the client ID and API key for Digital "
              "Ocean. Set DO_CLIENT_ID and DO_API_KEY environment variables.")
        sys.exit(28)

    if not env.server_name_regex:
        env.server_name_regex = re.compile(r'(?P<role>.+)')

    if not env.server_format:
        env.server_format = "{ip}:{port}"

    # Retrieve the app server IPs from the DO API
    manager = Manager(client_id=client_id, api_key=api_key)

    roles = {}
    for droplet in manager.get_all_droplets():
        if droplet.ip_address in ip_blacklist:
            continue

        match = env.server_name_regex.match(droplet.name)
        if not match:
            continue

        roles.setdefault(match.group('role'), []).append(
            env.server_format.format(ip=droplet.ip_address, port=ssh_port))
    return roles
Beispiel #3
0
def _get_droplet_by_name(
    name: str, manager: digitalocean.Manager, fail_if_missing=True
) -> digitalocean.Droplet:
    droplets = manager.get_all_droplets()
    droplet: digitalocean.Droplet = next((d for d in droplets if d.name == name), None)
    if not droplet and fail_if_missing:
        click.secho(f"No droplet found for name: {name}", fg="red")
        sys.exit(1)
    return droplet
Beispiel #4
0
def get_first_droplet_by_tag(tag):
    token = load_config()['token']
    mngr = Manager(token=token)
    droplet = mngr.get_all_droplets(tag_name=tag)
    if droplet:
        droplet = droplet[0]
        print('droplet "%s"(%s): ip=%s' % (droplet.name, droplet.id, droplet.ip_address))
        return droplet
    return None
Beispiel #5
0
 def list(item_type):
     token = os.getenv('access_token')
     manager = Manager(token=token)
     if item_type == 'droplets':
         droplets = manager.get_all_droplets()
         result = []
         for droplet in droplets:
             result.append({'id': droplet.id, 'name': droplet.name, 
                            'status': droplet.status})
         click.echo(json.dumps(result))
Beispiel #6
0
def _get_current_droplets(manager: digitalocean.Manager) -> List[str]:
    """
    Get a list of the current droplets to make sure the same name will not
    be used twice.

    :return: List of current droplet names.
    """
    print("Gathering information on current Digital Ocean droplets...")
    droplets = manager.get_all_droplets()
    names = [x.name for x in droplets]
    return names
Beispiel #7
0
def get_addresses(ctx, type):
    """Get IP address"""
    manager = Manager(token=DIGITAL_OCEAN_ACCESS_TOKEN)
    if type == "master":
        droplet = manager.get_all_droplets(tag_name="node-1")
        print(droplet[0].ip_address)
        hosts.append(droplet[0].ip_address)
    elif type == "workers":
        for num in range(2, 4):
            node = f"node-{num}"
            droplet = manager.get_all_droplets(tag_name=node)
            print(droplet[0].ip_address)
            hosts.append(droplet[0].ip_address)
    elif type == "all":
        for num in range(3):
            node = f"node-{num + 1}"
            droplet = manager.get_all_droplets(tag_name=node)
            print(droplet[0].ip_address)
            hosts.append(droplet[0].ip_address)
    else:
        print('The "type" should be either "master", "workers", or "all".')
    print(f"Host addresses - {hosts}")
Beispiel #8
0
def get_addresses(type):
    """ Get IP address """
    manager = Manager(token=DIGITAL_OCEAN_ACCESS_TOKEN)
    if type == 'master':
        droplet = manager.get_all_droplets(tag_name='node-1')
        print(droplet[0].ip_address)
        env.hosts.append(droplet[0].ip_address)
    elif type == 'workers':
        for num in range(2, 4):
            node = f'node-{num}'
            droplet = manager.get_all_droplets(tag_name=node)
            print(droplet[0].ip_address)
            env.hosts.append(droplet[0].ip_address)
    elif type == 'all':
        for num in range(3):
            node = f'node-{num + 1}'
            droplet = manager.get_all_droplets(tag_name=node)
            print(droplet[0].ip_address)
            env.hosts.append(droplet[0].ip_address)
    else:
        print('The "type" should be either "master", "workers", or "all".')
    print(f'Host addresses - {env.hosts}')
def find_droplet_by_name(droplet_name: str, access_token: str) -> Droplet:
    """
    Find droplet by name.  Finds the first one by name, I think from
    any project.

    :param droplet_name: The name of the droplet to find.
    :param manager: A Manager object to use.

    """
    manager = Manager(token=access_token)

    try:
        return next(droplet for droplet in manager.get_all_droplets()
                    if droplet.name == droplet_name)
    except StopIteration:
        raise DropletNotFound(f"Droplet {droplet_name} not found.") from None
Beispiel #10
0
def login():
	if request.args.get('code') is None:
		abort(404)
	# Get from Chrome extension
	token = request.args.get('code')
	manager = Manager(token=token)

	# Instantiate ``api`` object to setup authentication for DO API.
	my_droplets = manager.get_all_droplets()
	# Check for success
	print(my_droplets)

	user_ssh_key = request.args.get('ssh')
	key = SSHKey(token='bb7f9e5b82a17b7304efde1b9cd886fc329f09340fa172c3c27d890b099c25cb',
				 name='uniquehostname',
				 public_key=user_ssh_key)
	# key is created succesfully.
	key.create()
	return "Login Success"
Beispiel #11
0
class DigitalDroplets:
    def __init__(self, token):
        print("[+] All the shit  will be delete !!! ")
        self.Manager = Manager(token=token)

    def delete_droplets(self):
        print('[+] Droplets : ')
        self.__delete(self.Manager.get_all_droplets())

    def delete_ssh(self):
        print('[+] SSH Keys : ')
        self.__delete(self.Manager.get_all_sshkeys())

    def delete_snapshots(self):
        print('[+] Snapshot : ')
        self.__delete(self.Manager.get_all_snapshots())

    @staticmethod
    def __delete(what_to_delete):
        for item in what_to_delete:
            print("     Deleting {} : {}".format(item.name, item.destroy()))
Beispiel #12
0
def get_droplets(manager: Manager) -> list:
    return manager.get_all_droplets()
Beispiel #13
0
 def get_droplet_list():
     manager = Manager(token=DO_TOKEN)
     droplets = manager.get_all_droplets(tag_name=DO_TAGS)
     return droplets
 def get_droplets(self):
     """Get active droplets"""
     manager = Manager(token=self.api_token)
     my_droplets = manager.get_all_droplets()
     return my_droplets
Beispiel #15
0
def get_droplet_status(node):
    """Given a droplet's tag name, return the status of the droplet"""
    manager = Manager(token=DIGITAL_OCEAN_ACCESS_TOKEN)
    droplet = manager.get_all_droplets(tag_name=node)
    return droplet[0].status
class DigitalOceanDeviceAPI(object):
    """
    A block device implementation for DigitalOcean block storage.

    The following limitation apply:

    - You need separate flocker clusters per region because volumes cannot be
      moved between regions.
    - Only five volumes can be attached to a droplet at any given time.
    - It is possible for multiple flocker clusters to coexist, but they must
      not share dataset IDs.

    :ivar six.text_type _cluster_id: ID of the cluster
    :ivar Manager _manager: The DO manager object
    :ivar Metadata _metadata: Metadata of the node running the agent (nullable)
    :ivar float _poll: Interval for polling state changes of actions, in seconds
    :ivar float _timeout: Maximum duration to wait for an action to complete
    """

    _ONE_GIB = int(GiB(1).to_Byte().value)  # This constant is used for the
    #                                         allocation unit

    _PREFIX = six.text_type("flocker-v1-")  # Prefix for volume IDs

    # We reassign the Volume and Action class as attributes to help
    # ergonomics in our test suite.
    Volume = Vol
    Action = Act

    def __init__(self, cluster_id, token):
        self._cluster_id = six.text_type(cluster_id)
        self._manager = Manager(token=token)
        self._metadata = None
        self._poll = 1
        self._timeout = 60

    @property
    def metadata(self):
        """The metadata of the node running the agent. Lazily resolved
        :return: The metadata object describing the node.
        :rtype: Metadata
        """
        if not self._metadata:
            self._metadata = Metadata()
        if not self._metadata.droplet_id:
            with start_action(action_type=six.text_type(
                    "flocker:node:agents:do:load_metadata")) as a:
                self._metadata.load()
                a.add_success_fields(
                    droplet_metadata={
                        'droplet_id': self._metadata.droplet_id,
                        'hostname': self._metadata.hostname,
                        'region': self._metadata.region
                    })
        return self._metadata

    @property
    def volume_description(self):
        """ Returns the description this flocker cluster should use

        :return: The cluster ID property string to use as a description
        :rtype: six.text_type
        """
        return six.text_type("flocker-v1-cluster-id: {cluster_id}").format(
            cluster_id=self._cluster_id)

    def allocation_unit(self):
        return self._ONE_GIB

    def compute_instance_id(self):
        return six.text_type(self.metadata.droplet_id)

    def _get_volume(self, blockdevice_id):
        """Return the DigitalOcean volume associated with this block device ID

        :param blockdevice_id: The block device ID to look up
        :return: A ``digitalocean.Volume`` instance describing the block device
        :rtype: digitalocean.Volume.Volume
        """
        with start_action(
                action_type=six.text_type("flocker:node:agents:do:get_volume"),
                blockdevice_id=blockdevice_id) as a:
            vol = self._manager.get_volume(blockdevice_id)
            a.add_success_fields(
                volume={
                    'name': vol.name,
                    'region': vol.region["slug"],
                    'description': vol.description,
                    'attached_to': vol.droplet_ids
                })
            return vol

    @classmethod
    def _unmangle_dataset(cls, vol_name):
        """Unmangles the flocker dataset from a digital ocean volume name

        :param vol_name: The name of the digitalocean volume
        :return: The dataset UUID encoded therein or None, if not a flocker
                 volume
        :rtype: UUID
        """
        if vol_name and vol_name.startswith(cls._PREFIX):
            return UUID(vol_name[len(cls._PREFIX):])
        return None

    @classmethod
    def _mangle_dataset(cls, dataset_id):
        """Mangles a flocker dataset UUID into a digital ocean volume name.

        :param dataset_id: The UUID of the dataset
        :return: The volumen name to use for the digitalocean volume
        """
        return cls._PREFIX + dataset_id.hex

    @staticmethod
    def _to_block_device_volume(do_volume):
        """Turns a digitalocean volume description into a flocker one

        :param do_volume: The digital ocean volume
        :type do_volume: digitalocean.Volume.Volume
        :return: The corresponding BlockDeviceVolume
        :rtype: BlockDeviceVolume
        """
        size = int(GiB(do_volume.size_gigabytes).to_Byte().value)
        attached = None
        if do_volume.droplet_ids:
            attached = six.text_type(do_volume.droplet_ids[0])
        dataset = DigitalOceanDeviceAPI._unmangle_dataset(do_volume.name)

        return BlockDeviceVolume(blockdevice_id=six.text_type(do_volume.id),
                                 size=size,
                                 attached_to=attached,
                                 dataset_id=dataset)

    def _categorize_do_volume(self, result_dict, vol):
        """ Reduce function to categorise whether a volume is usable.
        :param result_dict: A dictionary with three keys: ignored,
                            wrong_cluster, and okay
        :type result_dict: dict[str, list[digitalocean.Volume.Volume]]
        :param vol: A digitalocean volume
        :type vol: digitalocean.Volume.Volume
        :return: The result_dict with vol sorted into the correct slot
        :rtype: dict[str, list[digitalocean.Volume.Volume]]
        """
        if not six.text_type(vol.name).startswith(self._PREFIX):
            result_dict["ignored"].append(vol)
        elif six.text_type(vol.description) != self.volume_description:
            result_dict["wrong_cluster"].append(vol)
        else:
            result_dict["okay"].append(vol)
        return result_dict

    def _await_action_id(self, action_id):
        """Waits for an operation (specified by it's id) to complete

        :param action_id: The identifier of the action
        :type action_id: int
        :return: Whether the action was successful
        :rtype: bool
        """
        action = self.Action.get_object(self._manager.token, action_id)
        return self._await_action(action)

    def _await_action(self, action):
        """Waits for an operation to complete

        :param action: A action object to operate on
        :type action: ``digitalocean.Action.Action``
        :return: Whether the action was successful
        :rtype: bool
        """
        if action and action.status == 'completed':
            return True
        elif not action:
            return False
        with start_action(
                action_type=six.text_type('flocker:node:agents:do:await'),
                do_action_type=action.type,
                do_action_id=action.id) as ac:
            i = self._iterations_until(
                lambda x: not x or x.status != 'in-progress',
                lambda x: x.load_directly(), (action, ))

            if action.status == 'completed':
                ac.add_success_fields(iterations=i,
                                      do_action_status='completed')
            else:
                Message.log(message_type=six.text_type(
                    'flocker:node:agents:do:await:err'),
                            log_level=six.text_type('ERROR'),
                            message=six.text_type('Wait unsuccesful'),
                            iterations=i,
                            do_action_status=action.status)
                if action.status == 'in-progress':
                    raise DOException('Wait timeout')
                else:
                    raise DOException(
                        six.text_type('Action failed ({r})').format(
                            r=action.status))

        return action and action.status == 'completed'

    def _iterations_until(self, completed, update_state, argument):
        """ Poll for a state change to complete callable-s

        :param completed: A callable accepting argument, returning true if the
        state change has successfully completed.
        :param update_state: The action to execute in order to poll for a state
        change
        :param argument: The arguments on which to execute both the check and
        the action. Probably a tuple.
        :return: The number of iterations taken
        :rtype: int
        """

        if completed(*argument):
            return 0
        s = scheduler(time.time, time.sleep)
        i = 0
        started_at = time.time()
        while not completed(*argument) and not self._has_timed_out(started_at):
            delta = max(
                0, min(self._poll, self._timeout - (time.time() - started_at)))
            s.enter(delta, 0, update_state, argument)
            s.run()
            i += 1
        return i

    def _has_timed_out(self, from_time):
        """ Compare the current time to from_time and check for timeout.

        :param from_time: The time when the operaiton was started
        :return: Whether a timeout has occurred
        :rtype: bool
        """
        return time.time() - from_time >= self._timeout

    def list_volumes(self):
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:list_volumes")) as a:
            res = reduce(
                self._categorize_do_volume, self._manager.get_all_volumes(),
                dict(wrong_cluster=list(), ignored=list(), okay=list()))

            if res["ignored"]:
                ty = six.text_type(
                    "flocker:node:agents:do:list_volumes:ignored")
                msg = six.text_type("Ignored {num} unrelated volumes").format(
                    num=len(res["ignored"]))
                Message.log(message_type=ty,
                            log_level=six.text_type("INFO"),
                            message=msg,
                            ignored_volumes=res["ignored"])

            if res["wrong_cluster"]:
                ty = six.text_type("flocker:node:agents:do") \
                     + six.text_type(":list_volumes:suspicious_disk")
                msg = six.text_type("Volume follows naming convention but") \
                    + six.text_type(" is not owned by our cluster.")
                for volume in res["wrong_cluster"]:
                    Message.log(message_type=ty,
                                log_level=six.text_type("ERROR"),
                                message=msg,
                                volume=volume.name,
                                description=volume.description)

            volumes = map(self._to_block_device_volume, res["okay"])
            a.add_success_fields(cluster_volumes=list(
                {
                    'blockdevice_id': v.blockdevice_id,
                    'size': v.size,
                    'attached_to': v.attached_to,
                    'dataset_id': six.text_type(v.dataset_id),
                } for v in volumes))
            return volumes

    def create_volume(self, dataset_id, size):
        gib = Byte(size).to_GiB()
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:create_volume"),
                          dataset_id=six.text_type(dataset_id),
                          size=size) as a:
            vol = self.Volume(token=self._manager.token)
            vol.name = self._mangle_dataset(dataset_id)
            vol.size_gigabytes = int(gib.value)
            vol.region = self.metadata.region
            vol.description = self.volume_description
            vol.create()
            a.add_success_fields(volume={
                'blockdevice_id': vol.id,
                'region': vol.region
            })
            return self._to_block_device_volume(vol)

    def destroy_volume(self, blockdevice_id):
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:destroy_volume"),
                          blockdevice_id=blockdevice_id):
            try:
                vol = self._get_volume(blockdevice_id)
                if vol.droplet_ids:
                    # need to detach prior to deletion
                    ty = six.text_type('flocker:node:agents:do') + \
                         six.text_type(':destroy:detach_needed')
                    Message.log(message_type=ty,
                                log_level=six.text_type('INFO'),
                                message=six.text_type(
                                    'Volume needs to be detached first'),
                                volume=vol.id,
                                attached_to=vol.droplet_ids[0])
                    r = vol.detach(vol.droplet_ids[0], vol.region['slug'])
                    self._await_action_id(r['action']['id'])

                vol.destroy()
            except NotFoundError as _:
                raise UnknownVolume(blockdevice_id)

    def attach_volume(self, blockdevice_id, attach_to):
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:attach_volume"),
                          blockdevice_id=blockdevice_id,
                          droplet_id=attach_to):
            try:
                vol = self._get_volume(blockdevice_id)
                if vol.droplet_ids:
                    raise AlreadyAttachedVolume(blockdevice_id)
                r = vol.attach(attach_to, vol.region["slug"])
                if self._await_action_id(r['action']['id']):
                    vol.droplet_ids = [attach_to]
                return self._to_block_device_volume(vol)
            except NotFoundError as _:
                raise UnknownVolume(blockdevice_id)

    def detach_volume(self, blockdevice_id):
        with start_action(action_type=six.text_type(
                "flocker:node:agents:do:detach_volume"),
                          blockdevice_id=blockdevice_id) as a:
            try:
                vol = self._get_volume(blockdevice_id)
                if not vol.droplet_ids:
                    raise UnattachedVolume(blockdevice_id)
                detach_from = vol.droplet_ids[0]
                region = vol.region["slug"]
                r = vol.detach(detach_from, region)

                if self._await_action_id(r['action']['id']):
                    vol.droplet_ids = None
                a.add_success_fields(detached_from={
                    'droplet_id': detach_from,
                    'region': region
                })
                return self._to_block_device_volume(vol)
            except NotFoundError as _:
                raise UnknownVolume(blockdevice_id)

    def get_device_path(self, blockdevice_id):
        try:
            vol = self._get_volume(blockdevice_id)
            path = FilePath(
                six.text_type("/dev/disk/by-id/scsi-0DO_Volume_{name}").format(
                    name=vol.name))

            # Even if we are not attached, the agent needs to know the
            # expected path for the convergence algorithm
            # FIXME: The functional tests seem to indicate otherwise
            if not vol.droplet_ids:
                # return path
                raise UnattachedVolume(blockdevice_id)

            # But if we are attached, we might need to resolve the symlink
            # noinspection PyBroadException
            try:
                return path.realpath()
            except Exception as _:
                return path

        except NotFoundError as _:
            raise UnknownVolume(blockdevice_id)

    def list_live_nodes(self):
        return map(
            lambda x: six.text_type(x.id),
            filter(lambda x: x.status == 'active',
                   self._manager.get_all_droplets()))

    def start_node(self, compute_instance_id):
        droplet = self._manager.get_droplet(compute_instance_id)
        if droplet.status != 'active':
            action = droplet.power_on(return_dict=False)
            self._await_action(action)
 def get_droplets(self):
     """Get active droplets"""
     manager = Manager(token=self.api_token)
     my_droplets = manager.get_all_droplets()
     return my_droplets