Esempio n. 1
0
class Container(Base):
    id = fields.Integer()
    flist = fields.String(default="")
    hub_url = fields.String(default="")
    storage_url = fields.String(default="")
    environment = fields.Typed(dict)
    secret_environment = fields.Typed(dict)
    entrypoint = fields.String(default="")
    interactive = fields.Boolean(default=True)
    volumes = fields.List(fields.Object(ContainerMount))
    network_connection = fields.List(fields.Object(ContainerNetworkConnection))
    stats = fields.List(fields.Object(ContainerStats))
    farmer_tid = fields.Integer()
    logs = fields.List(fields.Object(ContainerLogs))
    capacity = fields.Object(ContainerCapacity)
    info = fields.Object(ReservationInfo)

    def resource_units(self):
        cap = self.capacity
        resource_units = ResourceUnitAmount()
        resource_units.cru = cap.cpu
        resource_units.mru = round(cap.memory / 1024 * 10000) / 10000
        storage_size = round(cap.disk_size / 1024 * 10000) / 10000
        storage_size = max(0, storage_size - 50)  # we offer the 50 first GB of storage for container root filesystem
        if cap.disk_type == DiskType.HDD:
            resource_units.hru += storage_size
        elif cap.disk_type == DiskType.SSD:
            resource_units.sru += storage_size
        return resource_units
Esempio n. 2
0
class User(Base):
    emails = fields.List(fields.String())
    permissions = fields.List(fields.Object(Permission))
    custom_config = fields.Typed(dict)
    type = fields.Enum(UserType)
    password = fields.Secret()

    first_name = fields.String(default="")
    last_name = fields.String(default="")

    def get_full_name(self):
        name = self.first_name
        if self.last_name:
            name += " " + self.last_name
        return name

    def get_unique_name(self):
        return self.full_name.replace(" ", "") + ".user"

    full_name = fields.String(compute=get_full_name)
    unique_name = fields.String(compute=get_unique_name)

    def get_my_greeter(self):
        return Greeter(self.full_name)

    my_greeter = fields.Typed(Greeter, stored=False, compute=get_my_greeter)
    ahmed_greeter = fields.Typed(Greeter,
                                 stored=False,
                                 default=Greeter("ahmed"))
Esempio n. 3
0
class HardwareProof(Base):
    created = fields.DateTime()
    hardware_hash = fields.String(default="")
    disk_hash = fields.String(default="")
    hardware = fields.Typed(dict)
    disks = fields.Typed(dict)
    hypervisor = fields.List(fields.String())
Esempio n. 4
0
class ChartConfig(Base):
    cert_resolver = fields.String(default="le")
    domain = fields.String(default=None)
    domain_type = fields.String()
    resources_limits = fields.Typed(dict, default={})
    backup = fields.String(default="vdc")
    ip_version = fields.String(default="IPv6")
    extra_config = fields.Typed(dict, default={})
Esempio n. 5
0
class User(Base):
    user_code = fields.String(default="")
    poll_name = fields.String(default="")
    wallets_addresses = fields.List(fields.String())
    transaction_hashes = fields.List(fields.String())
    tokens = fields.Float(default=0.0)
    vote_data = fields.Typed(dict, default={})
    extra_data = fields.Typed(dict, default={})
    vote_data_weighted = fields.Typed(dict, default={})
    has_voted = fields.Boolean(default=False)
    manifesto_version = fields.String(default="2.0.0")
Esempio n. 6
0
class TfgridSolution1(Base):
    id = fields.Integer()
    name = fields.String(default="")
    solution_type = fields.Enum(SolutionType)
    rid = fields.Integer()
    form_info = fields.Typed(dict)
    explorer = fields.String(default="")
Esempio n. 7
0
class Container(Base):
    id = fields.Integer()
    flist = fields.String(default="")
    hub_url = fields.String(default="")
    storage_url = fields.String(default="")
    environment = fields.Typed(dict)
    secret_environment = fields.Typed(dict)
    entrypoint = fields.String(default="")
    interactive = fields.Boolean(default=True)
    volumes = fields.List(fields.Object(ContainerMount))
    network_connection = fields.List(fields.Object(ContainerNetworkConnection))
    stats_aggregator = fields.List(fields.Object(Statsaggregator))
    farmer_tid = fields.Integer()
    logs = fields.List(fields.Object(ContainerLogs))
    capacity = fields.Object(ContainerCapacity)
    info = fields.Object(ReservationInfo)
Esempio n. 8
0
class VMachine(VDCWorkloadBase):
    name = fields.String()
    public_ip = fields.Object(PublicIP)
    size = fields.Integer()
    resources = fields.Typed(dict)
    ip_address = fields.String(default="")

    @classmethod
    def from_workload(cls, workload):
        vmachine = cls()
        vmachine.wid = workload.id
        metadata = j.sals.reservation_chatflow.reservation_chatflow.decrypt_reservation_metadata(workload.info.metadata)
        metadata = j.data.serializers.json.loads(metadata)
        vmachine.name = metadata["form_info"]["name"]
        vmachine.pool_id = workload.info.pool_id
        vmachine.node_id = workload.info.node_id
        vmachine.size = workload.size
        vmachine.resources = VMSIZES.get(workload.size)
        vmachine.ip_address = workload.ipaddress
        if workload.public_ip:
            vmachine.public_ip.wid = workload.public_ip
            zos = get_zos()
            public_ip_workload = zos.workloads.get(workload.public_ip)
            address = str(netaddr.IPNetwork(public_ip_workload.ipaddress).ip)
            vmachine.public_ip.address = address

        return vmachine
Esempio n. 9
0
class Wallet(Base):
    ID = fields.Integer(required=True)
    origin = fields.Typed(dict, default=dict)
    addresses = fields.Factory(Address)
    key = fields.Bytes()
    email = fields.Email()
    url = fields.URL(required=False, allow_empty=True)
    data = fields.Json(allow_empty=False)
Esempio n. 10
0
class TfgridSolutionsPayment1(Base):
    id = fields.Integer()
    rid = fields.Integer()
    explorer = fields.String(default="")
    currency = fields.String(default="")
    escrow_address = fields.String(default="")
    escrow_asset = fields.String(default="")
    total_amount = fields.String(default="")
    transaction_fees = fields.String(default="")
    payment_source = fields.String(default="")
    farmer_payments = fields.Typed(dict, default={})
    time = fields.DateTime(default=datetime.utcnow)
Esempio n. 11
0
class User(Base):
    id = fields.Integer()
    first_name = fields.String(default="")
    last_name = fields.String(default="")
    emails = fields.List(fields.String())
    permissions = fields.List(fields.Object(Permission))
    custom_config = fields.Typed(dict)
    rating = fields.Float()
    time = fields.DateTime(default=datetime.datetime.now)

    def get_full_name(self):
        name = self.first_name
        if self.last_name:
            name += " " + self.last_name
        return name

    def get_unique_name(self):
        return self.full_name.replace(" ", "") + ".user"

    full_name = fields.String(compute=get_full_name)
    unique_name = fields.String(compute=get_unique_name)
Esempio n. 12
0
class SSHClient(Client):
    """
    SSHClient has the following properties:
    sshkey (str): sshkey to use within that client
    host (str): host ip
    user (str): user to connect as default: True
    port (int): the port to use
    forward_agent (bool):  forward agent or not (default True)
    connect_timeout (int): timeout (default 10 seconds)

    """

    sshkey = fields.String(required=True)

    host = fields.String(default="127.0.0.1", required=True)
    user = fields.String(default="root", required=True)
    port = fields.Integer(default=22, required=True)
    forward_agent = fields.Boolean(default=True)
    connect_timeout = fields.Integer(default=10)
    connection_kwargs = fields.Typed(dict, default={})

    # gateway = ?  FIXME: should help with proxyjumps. http://docs.fabfile.org/en/2.4/concepts/networking.html#ssh-gateways

    inline_ssh_env = fields.Boolean(
        default=True
    )  # whether to send environment variables “inline” as prefixes in front of command strings (export VARNAME=value && mycommand here), instead of trying to submit them through the SSH protocol itself (which is the default behavior). This is necessary if the remote server has a restricted AcceptEnv setting (which is the common default).

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.__client = None

    @property
    def _sshkey(self):
        """ Get sshkey client that you have loaded
        e.g
            JS-NG> localconnection = j.clients.sshclient.new("localconnection")
            JS-NG> localconnection.sshkey = "xmonader"
            JS-NG> localconnection._sshkey()  -> SHKeyClient(_Base__instance_name='xmonader', _Base__parent=None, ...
        Returns:
            Obj: It returns object of SSHkeyClient
        """
        return j.clients.sshkey.get(self.sshkey)

    @property
    def sshclient(self):
        self.validate()
        if not self.__client:
            self.connection_kwargs[
                "key_filename"] = self._sshkey.private_key_path
            connection_kwargs = dict(
                host=self.host,
                user=self.user,
                port=self.port,
                forward_agent=self.forward_agent,
                connect_timeout=self.connect_timeout,
                connect_kwargs=self.connection_kwargs,
            )
            if self._sshkey.passphrase:
                connection_kwargs["connect_kwargs"][
                    "passphrase"] = self._sshkey.passphrase

            self.__client = j.core.executors.RemoteExecutor(
                **connection_kwargs)

        return self.__client

    def reset_connection(self):
        """ Reset the connection
        e.g
            localconnection = j.clients.sshclient.new("localconnection")
            localconnection.reset_connection()

        """
        self.__client = None
Esempio n. 13
0
class PackageManager(Base):
    packages = fields.Typed(dict, default=DEFAULT_PACKAGES.copy())

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._threebot = None

    @property
    def threebot(self):
        if self._threebot is None:
            self._threebot = j.servers.threebot.get()
        return self._threebot

    def get(self, package_name):
        if package_name in self.packages:
            package_path = self.packages[package_name]["path"]
            package_giturl = self.packages[package_name]["giturl"]
            package_kwargs = self.packages[package_name].get("kwargs", {})
            return Package(
                path=package_path,
                default_domain=self.threebot.domain,
                default_email=self.threebot.email,
                default_acme_server_type=self.threebot.acme_server_type,
                default_acme_server_url=self.threebot.acme_server_url,
                giturl=package_giturl,
                kwargs=package_kwargs,
            )

    def get_packages(self):
        all_packages = []

        # Add installed packages including outer packages
        for pkg in self.packages:
            package = self.get(pkg)
            if package:
                if j.sals.fs.exists(package.path):
                    chatflows = True if package.chats_dir else False
                    all_packages.append({
                        "name":
                        pkg,
                        "path":
                        package.path,
                        "giturl":
                        package.giturl,
                        "system_package":
                        pkg in DEFAULT_PACKAGES.keys(),
                        "installed":
                        True,
                        "frontend":
                        package.config.get("frontend", False),
                        "chatflows":
                        chatflows,
                    })
                else:
                    j.logger.error(
                        f"path {package.path} for {pkg} doesn't exist anymore")
            else:
                j.logger.error("pkg {pkg} is in self.packages but it's None")

        # Add uninstalled sdk packages under j.packages
        for path in set(pkgnamespace.__path__):
            for pkg in os.listdir(path):
                if pkg not in self.packages:
                    all_packages.append({
                        "name":
                        pkg,
                        "path":
                        j.sals.fs.dirname(getattr(j.packages, pkg).__file__),
                        "giturl":
                        "",
                        "system_package":
                        pkg in DEFAULT_PACKAGES.keys(),
                        "installed":
                        False,
                    })

        return all_packages

    def list_all(self):
        return list(self.packages.keys())

    def add(self, path: str = None, giturl: str = None, **kwargs):
        # first check if public repo
        # TODO: Check if package already exists
        if not any([path, giturl]) or all([path, giturl]):
            raise j.exceptions.Value("either path or giturl is required")
        pkg_name = ""
        if giturl:
            url = urlparse(giturl)
            url_parts = url.path.lstrip("/").split("/")
            if len(url_parts) == 2:
                pkg_name = url_parts[1].strip("/")
                j.logger.debug(
                    f"user didn't pass a URL containing branch {giturl}, try to guess (master, main, development) in order"
                )
                if j.tools.http.get(
                        f"{giturl}/tree/master").status_code == 200:
                    url_parts.extend(["tree", "master"])
                elif j.tools.http.get(
                        f"{giturl}/tree/main").status_code == 200:
                    url_parts.extend(["tree", "main"])
                elif j.tools.http.get(
                        f"{giturl}/tree/development").status_code == 200:
                    url_parts.extend(["tree", "development"])
                else:
                    raise j.exceptions.Value(
                        f"couldn't guess the branch for {giturl}")
            else:
                pkg_name = url_parts[-1].strip("/")

            if len(url_parts) < 4:
                raise j.exceptions.Value(f"invalid git URL {giturl}")

            org, repo, _, branch = url_parts[:4]
            repo_dir = f"{org}_{repo}_{pkg_name}_{branch}"
            repo_path = j.sals.fs.join_paths(DOWNLOADED_PACKAGES_PATH,
                                             repo_dir)
            repo_url = f"{url.scheme}://{url.hostname}/{org}/{repo}"

            # delete repo dir if exists
            j.sals.fs.rmtree(repo_path)

            j.tools.git.clone_repo(url=repo_url,
                                   dest=repo_path,
                                   branch_or_tag=branch)
            toml_paths = list(
                j.sals.fs.walk(repo_path,
                               "*",
                               filter_fun=lambda x: str(x).endswith(
                                   f"{pkg_name}/package.toml")))
            if not toml_paths:
                raise j.exceptions.Value(
                    f"couldn't find {pkg_name}/package.toml in {repo_path}")
            path_for_package_toml = toml_paths[0]
            package_path = j.sals.fs.parent(path_for_package_toml)
            path = package_path

        package = Package(
            path=path,
            default_domain=self.threebot.domain,
            default_email=self.threebot.email,
            giturl=giturl,
            kwargs=kwargs,
        )

        # TODO: adding under the same name if same path and same giturl should be fine, no?
        # if package.name in self.packages:
        #     raise j.exceptions.Value(f"Package with name {package.name} already exists")

        # execute package install method
        package.install(**kwargs)

        # install package if threebot is started
        if self.threebot.started:
            self.install(package)
            self.threebot.nginx.reload()
        self.packages[package.name] = {
            "name": package.name,
            "path": package.path,
            "giturl": package.giturl,
            "kwargs": package.kwargs,
        }

        self.save()

        # Return updated package info
        return {package.name: self.packages[package.name]}

    def delete(self, package_name):
        if package_name in DEFAULT_PACKAGES:
            raise j.exceptions.Value("cannot delete default packages")
        package = self.get(package_name)
        if not package:
            raise j.exceptions.NotFound(f"{package_name} package not found")

        # remove bottle servers
        rack_servers = list(self.threebot.rack._servers)
        for bottle_server in rack_servers:
            if bottle_server.startswith(f"{package_name}_"):
                self.threebot.rack.remove(bottle_server)

        # stop background services
        if package.services_dir:
            for service in package.services:
                self.threebot.services.stop_service(service["name"])

        if self.threebot.started:
            # unregister gedis actors
            gedis_actors = list(self.threebot.gedis._loaded_actors.keys())
            for actor in gedis_actors:
                if actor.startswith(f"{package_name}_"):
                    self.threebot.gedis._system_actor.unregister_actor(actor)

            # unload chats
            try:
                if package.chats_dir:
                    self.threebot.chatbot.unload(package.chats_dir)
            except Exception as e:
                j.logger.warning(
                    f"Couldn't unload the chats of package {package_name}, this is the exception {str(e)}"
                )

            # reload nginx
            self.threebot.nginx.reload()

        # execute package uninstall method
        package.uninstall()

        self.packages.pop(package_name)
        self.save()

    def install(self, package):
        """install and apply package configrations

        Args:
            package ([package object]): get package object using [self.get(package_name)]

        Returns:
            [dict]: [package info]
        """
        sys.path.append(package.path + "/../")  # TODO to be changed
        package.preinstall()
        for static_dir in package.static_dirs:
            path = package.resolve_staticdir_location(static_dir)
            if not j.sals.fs.exists(path):
                raise j.exceptions.NotFound(f"Cannot find static dir {path}")

        # add bottle servers
        for bottle_server in package.bottle_servers:
            path = j.sals.fs.join_paths(package.path,
                                        bottle_server["file_path"])
            if not j.sals.fs.exists(path):
                raise j.exceptions.NotFound(
                    f"Cannot find bottle server path {path}")

            bottle_app = package.get_bottle_server(path, bottle_server["host"],
                                                   bottle_server["port"])
            self.threebot.rack.add(f"{package.name}_{bottle_server['name']}",
                                   bottle_app)

        # register gedis actors
        if package.actors_dir:
            for actor in package.actors:
                self.threebot.gedis._system_actor.register_actor(
                    actor["name"], actor["path"], force_reload=True)

        # add chatflows actors
        if package.chats_dir:
            self.threebot.chatbot.load(package.chats_dir)

        # start background services
        if package.services_dir:
            for service in package.services:
                self.threebot.services.add_service(service["path"])

        # start servers
        self.threebot.rack.start()

        # apply nginx configuration
        package.nginx_config.apply()

        # execute package start method
        package.start()
        self.threebot.gedis_http.client.reload()
        self.threebot.nginx.reload()

    def reload(self, package_name):
        if self.threebot.started:
            package = self.get(package_name)
            if not package:
                raise j.exceptions.NotFound(
                    f"{package_name} package not found")
            if package.services_dir:
                for service in package.services:
                    self.threebot.services.stop_service(service["name"])
            self.install(package)
            self.threebot.nginx.reload()
            self.save()
        else:
            raise j.exceptions.Runtime(
                "Can't reload package. Threebot server is not started")

        # Return updated package info
        return {package.name: self.packages[package.name]}

    def _install_all(self):
        """Install and apply all the packages configurations
        This method shall not be called directly from the shell,
        it must be called only from the code on the running Gedis server
        """
        all_packages = self.list_all()
        for package in all_packages:
            if package not in DEFAULT_PACKAGES:
                j.logger.info(f"Configuring package {package}")
                pkg = self.get(package)
                if not pkg:
                    j.logger.error(f"can't get package {package}")
                else:
                    if pkg.path and j.sals.fs.exists(pkg.path):
                        self.install(pkg)
                    else:
                        j.logger.error(
                            f"package {package} was installed before but {pkg.path} doesn't exist anymore."
                        )

    def scan_packages_paths_in_dir(self, path):
        """Scans all packages in a path in any level and returns list of package paths

        Args:
            path (str): root path that has packages on some levels

        Returns:
            List[str]: list of all packages available under the path
        """
        filterfun = lambda x: str(x).endswith("package.toml")
        pkgtoml_paths = j.sals.fs.walk(path, filter_fun=filterfun)
        pkgs_paths = list(
            map(lambda x: x.replace("/package.toml", ""), pkgtoml_paths))
        return pkgs_paths

    def scan_packages_in_dir(self, path):
        """Gets a dict from packages names to packages paths existing under a path that may have jumpscale packages at any level.

        Args:
            path (str): root path that has packages on some levels

        Returns:
            Dict[package_name, package_path]: dict of all packages available under the path
        """
        pkgname_to_path = {}
        for p in self.scan_packages_paths_in_dir(path):
            basename = j.sals.fs.basename(p).strip()
            if basename:
                pkgname_to_path[basename] = p

        return pkgname_to_path
Esempio n. 14
0
class ResticRepo(Base):
    repo = fields.String(required=True)
    password = fields.Secret(required=True)
    extra_env = fields.Typed(dict, default={})

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._check_install("restic")
        self._env = None

    def _check_install(self, binary):
        if subprocess.call(["which", binary], stdout=subprocess.DEVNULL):
            raise NotFound(f"{binary} not installed")

    @property
    def env(self):
        if not self._env:
            self.validate()
            self._env = os.environ.copy()
            self._env.update({"RESTIC_PASSWORD": self.password, "RESTIC_REPOSITORY": self.repo}, **self.extra_env)
        return self._env

    def _run_cmd(self, cmd, check=True):
        proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.env)
        if check and proc.returncode:
            raise Runtime(f"Restic command failed with {proc.stderr.decode()}")
        return proc

    def init_repo(self):
        """Init restic repo with data specified in the instances
        """
        proc = self._run_cmd(["restic", "cat", "config"], False)
        if proc.returncode > 0:
            self._run_cmd(["restic", "init"])

    def backup(self, path, tags=None):
        """Backup a path to the repo

        Args:
            path (str): local path to backup
            tags (list): list of tags to set to the backup
        """
        tags = tags or []
        cmd = ["restic", "backup", path]
        for tag in tags:
            cmd.extend(["--tag", tag])
        self._run_cmd(cmd)

    def restore(self, target_path, snapshot_id=None, latest=True, path=None, host=None):
        """restores a snapshot

        Args:
            target_path (str): path to restore to
            snapshot_id (str, optional): id of the snapshot. Defaults to None.
            latest (bool, optional): if True will use latest snapshot. Defaults to True.
            path (str, optional): Filter on the path when using latest. Defaults to None.
            host (str, optional): Filter on the hostname when using latest. Defaults to None.
        """
        cmd = ["restic", "--target", target_path, "restore"]
        if snapshot_id:
            cmd.append(snapshot_id)
            self._run_cmd(cmd)
        elif latest:
            args = ["latest"]
            if path:
                args.extend(["--path", path])
            if host:
                args.extend(["--host", host])
            self._run_cmd(cmd + args)
        else:
            raise ValueError("Please specify either `snapshot_id` or `latest` flag")

    def list_snapshots(self, tags=None, last=False, path=None):
        """List all snapshots in the repo

        Args:
            tags (list): list of tags to filter on
            last (bool): if True will get last snapshot only while respecting the other filters
            path (str): path to filter on

        Returns
            list : all snapshots as dicts
        """
        tags = tags or []
        cmd = ["restic", "snapshots", "--json"]
        for tag in tags:
            cmd.extend(["--tag", tag])

        if path:
            cmd.extend(["--path", path])
        if last:
            cmd.append("--last")
        proc = self._run_cmd(cmd)
        return json.loads(proc.stdout)

    def forget(self, keep_last=10, prune=True):
        """Deletes data in the repo

        Args:
            keep_last (str, optional): How many items to keep. Defaults to 10.
            prune (bool, optional): Whether to prune the data or not. Defaults to True.
        """
        cmd = ["restic", "forget", "--keep-last", str(keep_last)]
        if prune:
            cmd.append("--prune")
        self._run_cmd(cmd)

    def _get_script_path(self, path):
        return os.path.join(path, f"{self.instance_name}_restic_cron")

    def _get_crons_jobs(self):
        proc = subprocess.run(["crontab", "-l"], stderr=subprocess.DEVNULL, stdout=subprocess.PIPE)
        return proc.stdout.decode()

    def auto_backup(self, path, keep_last=20):
        """Runs a cron job that backups the repo and prunes the last specified backups

        Args:
            path (str): local path to backup
            keep_last (int, optional): How many items to keep in every forgot opertaion. Defaults to 20.
        """
        self._check_install("crontab")
        script_path = self._get_script_path(path)
        cronjobs = self._get_crons_jobs()
        if not self.auto_backup_running(path):  # Check if cron job already running
            cron_script = CRON_SCRIPT.format(repo=self.repo, password=self.password, path=path, keep_last=keep_last)
            with open(script_path, "w") as rfd:
                rfd.write(cron_script)

            cron_cmd = cronjobs + f"0 0 * * * bash {script_path} \n"
            proc = subprocess.Popen(["crontab", "-"], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            proc_res = proc.communicate(input=cron_cmd.encode())
            if proc.returncode > 0:
                raise Runtime(f"Couldn't start cron job, failed with {proc_res[1]}")

    def auto_backup_running(self, path):
        """Checks if auto backup for the specified path is running or not

        Args:
            path (str): local path to backup in the cron job

        Returns:
            bool: Whether it is running or not
        """
        script_path = self._get_script_path(path)
        cronjobs = self._get_crons_jobs()
        return cronjobs.find(script_path) >= 0

    def disable_auto_backup(self, path):
        """Removes cron jon based on the path being backed

        Args:
            path (str): local path to backup in the cron job
        """
        script_path = self._get_script_path(path)
        cronjobs = self._get_crons_jobs()
        other_crons = []
        for cronjob in cronjobs.splitlines():
            if script_path not in cronjob:
                other_crons.append(cronjob)
        proc = subprocess.Popen(["crontab", "-"], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        cron_cmd = "\n".join(other_crons) + "\n"
        proc_res = proc.communicate(input=cron_cmd.encode())
        if proc.returncode > 0:
            raise Runtime(f"Couldn't remove cron job, failed with {proc_res[1]}")
Esempio n. 15
0
class GedisServer(Base):
    host = fields.String(default="127.0.0.1")
    port = fields.Integer(default=16000)
    enable_system_actor = fields.Boolean(default=True)
    run_async = fields.Boolean(default=True)
    _actors = fields.Typed(dict)

    def __init__(self):
        super().__init__()
        self._actors = self._actors or {}
        self._core_actor = CoreActor()
        self._system_actor = SystemActor()
        self._loaded_actors = {"core": self._core_actor}

    @property
    def actors(self):
        """Lists saved actors
        
        Returns:
            list -- List of saved actors
        """
        return self._actors

    def actor_add(self, actor_name: str, actor_path: str):
        """Adds an actor to the server
        
        Arguments:
            actor_name {str} -- Actor name
            actor_path {str} -- Actor absolute path
        
        Raises:
            j.exceptions.Value: raises if actor name is matched one of the reserved actor names
            j.exceptions.Value: raises if actor name is not a valid identifier
        """
        if actor_name in RESERVED_ACTOR_NAMES:
            raise j.exceptions.Value("Invalid actor name")

        if not actor_name.isidentifier():
            raise j.exceptions.Value(
                f"Actor name should be a valid identifier")

        self._actors[actor_name] = actor_path

    def actor_delete(self, actor_name: str):
        """Removes an actor from the server
        
        Arguments:
            actor_name {str} -- Actor name
        """
        self._actors.pop(actor_name, None)

    def start(self):
        """Starts the server
        """
        j.application.start("gedis")

        # handle signals
        for signal_type in (SIGTERM, SIGTERM, SIGKILL):
            gevent.signal(signal_type, self.stop)

        # register system actor if enabled
        if self.enable_system_actor:
            self._register_actor("system", self._system_actor)

        self._core_actor.set_server(self)
        self._system_actor.set_server(self)

        # register saved actors
        for actor_name, actor_path in self._actors.items():
            self._system_actor.register_actor(actor_name, actor_path)

        # start the server
        server = StreamServer((self.host, self.port), self._on_connection)
        server.reuse_addr = True
        server.serve_forever()

    def stop(self):
        """Stops the server
        """
        j.logger.info("Shutting down ...")
        self._server.stop()

    def _register_actor(self, actor_name: str, actor_module: BaseActor):
        self._loaded_actors[actor_name] = actor_module

    def _unregister_actor(self, actor_name: str):
        self._loaded_actors.pop(actor_name, None)

    def _execute(self, method, args, kwargs):
        response = {}
        try:
            response["result"] = method(*args, **kwargs)

        except TypeError as e:
            response["error"] = str(e)
            response["error_type"] = GedisErrorTypes.BAD_REQUEST.value

        except:
            ttype, tvalue, tb = sys.exc_info()
            response["error"] = better_exceptions.format_exception(
                ttype, tvalue, tb)
            response["error_type"] = GedisErrorTypes.ACTOR_ERROR.value

        return response

    def _on_connection(self, socket, address):
        j.logger.info("New connection from {}", address)
        parser = DefaultParser(65536)
        connection = RedisConnectionAdapter(socket)
        try:
            encoder = ResponseEncoder(socket)
            parser.on_connect(connection)

            while True:
                try:
                    request = parser.read_response()
                    response = dict(success=True,
                                    result=None,
                                    error=None,
                                    error_type=None,
                                    is_async=False,
                                    task_id=None)

                    if len(request) < 2:
                        response["error"] = "invalid request"
                        response[
                            "error_type"] = GedisErrorTypes.BAD_REQUEST.value

                    else:
                        actor_name = request.pop(0).decode()
                        method_name = request.pop(0).decode()
                        actor_object = self._loaded_actors.get(actor_name)

                        if not actor_object:
                            response["error"] = "actor not found"
                            response[
                                "error_type"] = GedisErrorTypes.NOT_FOUND.value

                        elif not hasattr(actor_object, method_name):
                            response["error"] = "method not found"
                            response[
                                "error_type"] = GedisErrorTypes.NOT_FOUND.value

                        else:
                            j.logger.info(
                                "Executing method {} from actor {} to client {}",
                                method_name, actor_name, address)

                            if request:
                                args, kwargs = json.loads(
                                    request.pop(0), object_hook=deserialize)
                            else:
                                args, kwargs = (), {}

                            method = getattr(actor_object, method_name)
                            result = self._execute(method, args, kwargs)
                            response.update(result)

                except ConnectionError:
                    j.logger.info("Client {} closed the connection", address)

                except Exception as exception:
                    j.logger.exception("internal error", exception=exception)
                    response["error"] = "internal server error"
                    response[
                        "error_type"] = GedisErrorTypes.INTERNAL_SERVER_ERROR.value

                response["success"] = response["error"] is None
                encoder.encode(json.dumps(response, default=serialize))

            parser.on_disconnect()

        except BrokenPipeError:
            pass
Esempio n. 16
0
class User(Base):
    id = fields.Integer()
    emails = fields.List(fields.String())
    permissions = fields.List(fields.Object(Permission))
    custom_config = fields.Typed(dict)
    rating = fields.Float()
Esempio n. 17
0
class ResticRepo(Base):

    repo = fields.String(required=True)
    password = fields.Secret(required=True)
    extra_env = fields.Typed(dict, default={})

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self._check_install("restic")
        self._env = None

    def _check_install(self, binary):
        if subprocess.call(["which", binary], stdout=subprocess.DEVNULL):
            raise NotFound(f"{binary} not installed")

    @property
    def env(self):
        self.validate()
        self._env = os.environ.copy()
        self._env.update({"RESTIC_PASSWORD": self.password, "RESTIC_REPOSITORY": self.repo}, **self.extra_env)
        return self._env

    def _run_cmd(self, cmd, check=True):
        proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=self.env)
        if check and proc.returncode:
            raise Runtime(f"Restic command failed with {proc.stderr.decode()}")
        return proc

    def init_repo(self):
        """Init restic repo with data specified in the instances
        """
        proc = self._run_cmd(["restic", "cat", "config"], False)
        if proc.returncode > 0:
            self._run_cmd(["restic", "init"])

    def backup(self, path, tags=None, exclude=None):
        """Backup a path to the repo.

        Args:
            path (str or list of str): Local path/s to backup.
            tags (list): List of tags to set to the backup.
            exclude (list of str): This instructs restic to exclude files matching a given pattern/s.
        """
        if not path:
            raise ValueError("Please specify path/s to backup")
        cmd = ["restic", "backup"]
        tags = tags or []
        for tag in tags:
            cmd.extend(["--tag", tag])
        if exclude:
            for pattern in exclude:
                cmd.extend([f"--exclude={pattern}"])
        if isinstance(path, list):
            cmd.extend(path)
        else:
            cmd.extend([path])
        self._run_cmd(cmd)

    def restore(self, target_path, snapshot_id=None, latest=True, path=None, host=None, tags=None):
        """Restores a snapshot.

        Args:
            target_path (str): a path to restore to
            snapshot_id (str, optional): Id of the snapshot. Defaults to None.
            latest (bool, optional): if True will use latest snapshot. Defaults to True.
            path (str, optional): Filter on the path when using latest. Defaults to None.
            host (str, optional): Filter on the hostname when using latest. Defaults to None.
            tags (list, optional): List of tags to filter on.
        """
        cmd = ["restic", "--target", target_path, "restore"]
        if snapshot_id:
            cmd.append(snapshot_id)
            self._run_cmd(cmd)
        elif latest:
            args = ["latest"]
            if path:
                args.extend(["--path", path])
            if host:
                args.extend(["--host", host])
            if tags:
                for tag in tags:
                    cmd.extend(["--tag", tag])
            self._run_cmd(cmd + args)
        else:
            raise ValueError("Please specify either `snapshot_id` or `latest` flag")

    def list_snapshots(self, tags=None, last=False, path=None):
        """List all snapshots in the repo.

        Args:
            tags (list, optional): a list of tags to filter on. Defaults to None.
            last (bool): If True will get last snapshot only while respecting the other filters. Defaults to False.
            path (str): a path to filter on. Defaults to None.

        Returns
            list : all snapshots as dicts
        """
        tags = tags or []
        cmd = ["restic", "snapshots", "--json"]
        for tag in tags:
            cmd.extend(["--tag", tag])

        if path:
            cmd.extend(["--path", path])
        if last:
            cmd.append("--last")
        proc = self._run_cmd(cmd)
        return json.loads(proc.stdout)

    def forget(self, keep_last=10, prune=True, snapshots=None, tags=None):
        """Remove snapshots and Optionally remove the data that was referenced by those snapshots.
        During a prune operation, the repository is locked and backups cannot be completed.

        Args:
            keep_last (str, optional): How many items to keep. Defaults to 10.
            prune (bool, optional): Whether to actually remove the data or not. Defaults to True.
            snapshots (list, optional): a list of specifics snapshot ids to forget. if given, the value of keep_last parm will be ignored. Defaults to None.
            tags (list, optional): if given, Only the snapshots which have specified tags are considered. Defaults to None.
        """
        cmd = ["restic", "forget"]
        if tags:
            for tag in tags:
                cmd.extend(["--tag", tag])
        if keep_last and not snapshots:  # will be ignored in case if passing snapshot id/s. this is a restic behaviour.
            cmd.extend(["--keep-last", str(keep_last)])
        if prune:
            cmd.append("--prune")
        if snapshots:
            cmd.extend(snapshots)
        self._run_cmd(cmd)

    def _get_script_path(self, path):
        return os.path.join(path, f"{self.instance_name}_restic_cron")

    def _get_crons_jobs(self):
        proc = subprocess.run(["crontab", "-l"], stderr=subprocess.DEVNULL, stdout=subprocess.PIPE)
        return proc.stdout.decode()

    def auto_backup(self, path, keep_last=20):
        """Runs a cron job that backups the repo and prunes the last specified backups.

        Args:
            path (str): Local path to backup.
            keep_last (int, optional): How many items to keep in every forgot operation. Defaults to 20.
        """
        self._check_install("crontab")
        script_path = self._get_script_path(path)
        cronjobs = self._get_crons_jobs()
        if not self.auto_backup_running(path):  # Check if cron job already running
            cron_script = CRON_SCRIPT.format(repo=self.repo, password=self.password, path=path, keep_last=keep_last)
            with open(script_path, "w") as rfd:
                rfd.write(cron_script)

            cron_cmd = cronjobs + f"0 0 * * * bash {script_path} \n"
            proc = subprocess.Popen(["crontab", "-"], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            proc_res = proc.communicate(input=cron_cmd.encode())
            if proc.returncode > 0:
                raise Runtime(f"Couldn't start cron job, failed with {proc_res[1]}")

    def auto_backup_running(self, path):
        """Checks if auto backup for the specified path is running or not

        Args:
            path (str): Local path to backup in the cron job.

        Returns:
            bool: Whether it is running or not.
        """
        script_path = self._get_script_path(path)
        cronjobs = self._get_crons_jobs()
        return cronjobs.find(script_path) >= 0

    def disable_auto_backup(self, path):
        """Removes cron jon based on the path being backed.

        Args:
            path (str): Local path to backup in the cron job.
        """
        script_path = self._get_script_path(path)
        cronjobs = self._get_crons_jobs()
        other_crons = []
        for cronjob in cronjobs.splitlines():
            if script_path not in cronjob:
                other_crons.append(cronjob)
        proc = subprocess.Popen(["crontab", "-"], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
        cron_cmd = "\n".join(other_crons) + "\n"
        proc_res = proc.communicate(input=cron_cmd.encode())
        if proc.returncode > 0:
            raise Runtime(f"Couldn't remove cron job, failed with {proc_res[1]}")

    def backup_watchdog_running(self, script_path) -> bool:
        """Watches a cronjob to watch backups using last snapshot time.

        Args:
            script_path (str): a path to the script to run the cronjob.
        Returns:
            bool: True if the backup watchdog running otherwise False
        """
        script_path = self._get_script_path(script_path)
        cronjobs = self._get_crons_jobs()
        return cronjobs.find(script_path) >= 0

    def start_watch_backup(self, path):
        """Runs a cron job that backups the repo and prunes the last specified backups.

        Args:
            path (str): Local path to backup.
            keep_last (int, optional): How many items to keep in every forgot operation. Defaults to 20.
        """
        self._check_install("crontab")
        script_path = self._get_script_path(path)
        cronjobs = self._get_crons_jobs()
        if not self.backup_watchdog_running(path):  # Check if cron job already running
            cron_script = WATCHDOG_SCRIPT.format(
                repo=self.repo,
                password=self.password,
                path=path,
                AWS_SECRET_ACCESS_KEY=self.extra_env.get("AWS_SECRET_ACCESS_KEY"),
                AWS_ACCESS_KEY_ID=self.extra_env.get("AWS_ACCESS_KEY_ID"),
                THREEBOT_NAME=os.environ.get("THREEBOT_NAME"),
                ESCALATION_MAIL=os.environ.get("ESCALATION_MAIL"),
            )

            with open(script_path, "w") as rfd:
                rfd.write(cron_script)

            cron_cmd = cronjobs + f"0 0 */2 * * bash {script_path} \n"
            proc = subprocess.Popen(["crontab", "-"], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
            proc_res = proc.communicate(input=cron_cmd.encode())
            if proc.returncode > 0:
                raise Runtime(f"Couldn't start cron job, failed with {proc_res[1]}")
Esempio n. 18
0
class PackageManager(Base):
    packages = fields.Typed(dict, default=DEFAULT_PACKAGES.copy())

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self._threebot = None

    @property
    def threebot(self):
        if self._threebot is None:
            self._threebot = j.servers.threebot.get()
        return self._threebot

    def get(self, package_name):
        if package_name in self.packages:
            package_path = self.packages[package_name]["path"]
            package_giturl = self.packages[package_name]["giturl"]
            return Package(
                path=package_path,
                default_domain=self.threebot.domain,
                default_email=self.threebot.email,
                giturl=package_giturl,
            )

    def get_packages(self):
        all_packages = []

        # Add installed packages including outer packages
        for pkg in self.packages:
            package = self.get(pkg)
            all_packages.append({
                "name":
                pkg,
                "path":
                package.path,
                "giturl":
                package.giturl,
                "system_package":
                pkg in DEFAULT_PACKAGES.keys(),
                "installed":
                True,
                "frontend":
                package.config.get("frontend", False),
            })

        # Add uninstalled sdk packages under j.packages
        for path in set(pkgnamespace.__path__):
            for pkg in os.listdir(path):
                if pkg not in self.packages:
                    all_packages.append({
                        "name":
                        pkg,
                        "path":
                        j.sals.fs.dirname(getattr(j.packages, pkg).__file__),
                        "giturl":
                        "",
                        "system_package":
                        pkg in DEFAULT_PACKAGES.keys(),
                        "installed":
                        False,
                    })

        return all_packages

    def list_all(self):
        return list(self.packages.keys())

    def add(self, path: str = None, giturl: str = None, **kwargs):
        # TODO: Check if package already exists
        if not any([path, giturl]) or all([path, giturl]):
            raise j.exceptions.Value("either path or giturl is required")

        for package_name in self.packages:
            package = self.get(package_name)
            ## TODO: why do we care if the path is the same and giturl is the same? adding it 100 times should just add it once?
            # if path and path == package.path:
            #     raise j.exceptions.Value("Package with the same path already exists")
            # if giturl and giturl == package.giturl:
            #     raise j.exceptions.Value("Package with the same giturl already exists")

        if giturl:
            url = urlparse(giturl)
            url_parts = url.path.lstrip("/").split("/", 4)

            if len(url_parts) != 5:
                raise j.exceptions.Value("invalid path")

            org, repo, _, branch, package_path = url_parts
            repo_dir = f"{org}_{repo}_{branch}"
            repo_path = j.sals.fs.join_paths(DOWNLOADED_PACKAGES_PATH,
                                             repo_dir)
            repo_url = f"{url.scheme}://{url.hostname}/{org}/{repo}"

            # delete repo dir if exists
            j.sals.fs.rmtree(repo_path)

            j.tools.git.clone_repo(url=repo_url,
                                   dest=repo_path,
                                   branch_or_tag=branch)
            path = j.sals.fs.join_paths(repo_path, repo, package_path)

        package = Package(path=path,
                          default_domain=self.threebot.domain,
                          default_email=self.threebot.email,
                          giturl=giturl)

        # TODO: adding under the same name if same path and same giturl should be fine, no?
        # if package.name in self.packages:
        #     raise j.exceptions.Value(f"Package with name {package.name} already exists")

        self.packages[package.name] = {
            "name": package.name,
            "path": package.path,
            "giturl": package.giturl
        }

        # execute package install method
        package.install(**kwargs)

        # install package if threebot is started
        if self.threebot.started:
            self.install(package)
            self.threebot.nginx.reload()
        self.save()

        # Return updated package info
        return {package.name: self.packages[package.name]}

    def delete(self, package_name):
        if package_name in DEFAULT_PACKAGES:
            raise j.exceptions.Value("cannot delete default packages")
        package = self.get(package_name)
        if not package:
            raise j.exceptions.NotFound(f"{package_name} package not found")

        # remove bottle servers
        rack_servers = list(self.threebot.rack._servers)
        for bottle_server in rack_servers:
            if bottle_server.startswith(f"{package_name}_"):
                self.threebot.rack.remove(bottle_server)

        if self.threebot.started:
            # unregister gedis actors
            gedis_actors = list(self.threebot.gedis._loaded_actors.keys())
            for actor in gedis_actors:
                if actor.startswith(f"{package_name}_"):
                    self.threebot.gedis._system_actor.unregister_actor(actor)

            # unload chats
            try:
                if package.chats_dir:
                    self.threebot.chatbot.unload(package.chats_dir)
            except Exception as e:
                j.logger.warning(
                    f"Couldn't unload the chats of package {package_name}, this is the the exception {str(e)}"
                )

            # reload nginx
            self.threebot.nginx.reload()

        # execute package uninstall method
        package.uninstall()

        self.packages.pop(package_name)
        self.save()

    def install(self, package):
        """install and apply package configrations

        Args:
            package ([package object]): get package object using [self.get(package_name)]

        Returns:
            [dict]: [package info]
        """
        sys.path.append(package.path + "/../")  # TODO to be changed
        package.preinstall()
        for static_dir in package.static_dirs:
            path = package.resolve_staticdir_location(static_dir)
            if not j.sals.fs.exists(path):
                raise j.exceptions.NotFound(f"Cannot find static dir {path}")

        # add bottle servers
        for bottle_server in package.bottle_servers:
            path = j.sals.fs.join_paths(package.path,
                                        bottle_server["file_path"])
            if not j.sals.fs.exists(path):
                raise j.exceptions.NotFound(
                    f"Cannot find bottle server path {path}")

            bottle_app = package.get_bottle_server(path, bottle_server["host"],
                                                   bottle_server["port"])
            self.threebot.rack.add(f"{package.name}_{bottle_server['name']}",
                                   bottle_app)

        # register gedis actors
        if package.actors_dir:
            for actor in package.actors:
                self.threebot.gedis._system_actor.register_actor(
                    actor["name"], actor["path"])

        # add chatflows actors
        if package.chats_dir:
            self.threebot.chatbot.load(package.chats_dir)
        # start servers
        self.threebot.rack.start()

        # apply nginx configuration
        package.nginx_config.apply()

        # execute package start method
        package.start()
        self.threebot.gedis_http.client.reload()
        self.threebot.nginx.reload()

    def reload(self, package_name):
        if self.threebot.started:
            package = self.get(package_name)
            if not package:
                raise j.exceptions.NotFound(
                    f"{package_name} package not found")
            self.install(package)
            self.threebot.nginx.reload()
            self.save()
        else:
            raise j.exceptions.Runtime(
                "Can't reload package. Threebot server is not started")

        # Return updated package info
        return {package.name: self.packages[package.name]}

    def _install_all(self):
        """Install and apply all the packages configurations
        This method shall not be called directly from the shell,
        it must be called only from the code on the running Gedis server
        """
        all_packages = self.list_all()
        for package in all_packages:
            if package not in DEFAULT_PACKAGES:
                j.logger.info(f"Configuring package {package}")
                self.install(self.get(package))
Esempio n. 19
0
class StartupCmd(Base):
    start_cmd = fields.String()
    ports = fields.List(fields.Integer())
    executor = fields.Enum(Executor)
    check_cmd = fields.String()
    path = fields.String(default=j.core.dirs.TMPDIR)
    stop_cmd = fields.String()
    env = fields.Typed(dict, default={})
    timeout = fields.Integer(default=60)
    process_strings = fields.List(fields.String())
    process_strings_regex = fields.List(fields.String())

    def __init__(self):
        super().__init__()
        self._process = None
        self._pid = None
        self._cmd_path = None
        self.__tmux_window = None

    def reset(self):
        self._process = None
        self._pid = None

    @property
    def pid(self):
        if not self._pid:
            pids = j.sals.process.get_pids(f"startupcmd_{self.instance_name}")
            if pids:
                self._pid = pids[0]
        return self._pid

    @property
    def cmd_path(self):
        if not self._cmd_path:
            self._cmd_path = j.sals.fs.join_paths(j.core.dirs.VARDIR, "cmds",
                                                  f"{self.instance_name}.sh")
            j.sals.fs.mkdirs(j.sals.fs.dirname(self._cmd_path))
        return self._cmd_path

    @pid.setter
    def pid(self, pid):
        self._pid = pid

    @property
    def process(self):
        if not self._process:
            if self.pid:
                self._process = j.sals.process.get_process_object(self.pid,
                                                                  die=False)
                if not self._process:
                    self.pid = None
            else:
                processes = self._get_processes_by_port_or_filter()
                if len(processes) == 1:
                    self._process = processes[0]
        return self._process

    @property
    def _tmux_window(self):
        if self.executor == Executor.TMUX:
            if self.__tmux_window is None:
                self.__tmux_window = j.core.executors.tmux.get_js_window(
                    self.instance_name)
        return self.__tmux_window

    def _get_processes_by_port_or_filter(self):
        """Uses object properties to find the corresponding process(es)

        Returns:
            list: All processes that matched
        """

        pids_done = []
        result = []

        def _add_to_result(process):
            if process and process.pid not in pids_done:
                result.append(process)
                pids_done.append(process.pid)

        for port in self.ports:
            try:
                process = j.sals.process.get_process_by_port(port)
            except Exception:
                continue

            _add_to_result(process)

        for process_string in self.process_strings:
            for pid in j.sals.process.get_filtered_pids(process_string):
                process = j.sals.process.get_process_object(pid, die=False)
                _add_to_result(process)

        for pid in j.sals.process.get_pids_filtered_by_regex(
                self.process_strings_regex):
            process = j.sals.process.get_process_object(pid, die=False)
            _add_to_result(process)

        #  We return all processes which match
        return result

    def _kill_processes_by_port_or_filter(self):
        """Kills processes that matches object properties
        """
        processes = self._get_processes_by_port_or_filter()
        self._kill_processes(processes)

    def _kill_processes(self, processes):
        """Kill processes

        Args:
            processes (list): List of processes
        """
        for process in processes:
            try:
                process.kill()
            except NoSuchProcess:
                pass  # already killed

    def _soft_kill(self):
        """Kills the poocess using `stop_cmd`

        Returns:
            bool: True if was killed
        """
        if self.stop_cmd:
            cmd = j.tools.jinja2.render_template(template_text=self.stop_cmd,
                                                 args=self._get_data())
            exit_code, _, _ = j.sals.process.execute(cmd, die=False)
            self.reset()
            return exit_code == 0
        return False

    def _hard_kill(self):
        """Force Kills the process
        """
        if self.process:
            self._kill_processes([self.process])
            self.reset()

        self._kill_processes_by_port_or_filter()

        if self.executor == Executor.TMUX:
            self._tmux_window.kill_window()
            self.__tmux_window = None

    def stop(self, force=True, wait_for_stop=True, die=True, timeout=None):
        """Stops the running command

        Args:
            force (bool, optional): If True will force kill the process. Defaults to True.
            wait_for_stop (bool, optional): If True will wait until process is stopped. Defaults to True.
            die (bool, optional): If True will raise if timeout is exceeded for stop. Defaults to True.
            timeout (int, optional): Timeout for stop wait.If not set will use `timeout` property. Defaults to None.
        """

        timeout = timeout or self.timeout

        if self.is_running():
            self._soft_kill()

        if force:
            self._hard_kill()

        if wait_for_stop:
            self.wait_for_stop(die=die, timeout=timeout)
        j.sals.process.execute(f"rm {self.cmd_path}", die=False)

    def is_running(self):
        """Checks if startup cmd is running. Will use `check_cmd` property if defined or check based on objet properties

        Returns:
            bool: True if it is running
        """
        if self.check_cmd:
            exit_code, _, _ = j.sals.process.execute(self.check_cmd, die=False)
            return exit_code == 0

        self.reset()
        if self.process:
            return self.process.is_running()
        return self._get_processes_by_port_or_filter() != []

    def _wait(self, for_running, die, timeout):
        """Wait for either start or stop to finishes

        Args:
            for_running (bool): Whether to check if it is running or stopped.
            die (bool, optional): If True will raise if timeout is exceeded for stop. Defaults to True.
            timeout (int, optional): Timeout for wait operation. Defaults to None.

        Raises:
            j.exceptions.Timeout: If timeout is exceeded.
        """
        end = j.data.time.now().timestamp + timeout

        while j.data.time.now().timestamp < end:
            if self.is_running() == for_running:
                break
            time.sleep(0.05)
        else:
            if die:
                raise j.exceptions.Timeout(
                    f"Wait operation exceeded timeout: {timeout}")

    def wait_for_stop(self, die=True, timeout=10):
        """Wait for stop to finishes

        Args:
            die (bool, optional): If True will raise if timeout is exceeded for stop. Defaults to True.
            timeout (int, optional): Timeout for wait operation. Defaults to None.

        Raises:
            j.exceptions.Timeout: If timeout is exceeded.
        """
        self._wait(False, die, timeout)

    def wait_for_running(self, die=True, timeout=10):
        """Wait for start to finishes

        Args:
            die (bool, optional): If True will raise if timeout is exceeded for stop. Defaults to True.
            timeout (int, optional): Timeout for wait operation. Defaults to None.

        Raises:
            j.exceptions.Timeout: If timeout is exceeded.
        """
        self._wait(True, die, timeout)

    def start(self):
        """Starts the process
        """
        if self.is_running():
            return

        if not self.start_cmd:
            raise j.exceptions.Value("please make sure start_cmd has been set")

        if "\n" in self.start_cmd.strip():
            command = self.start_cmd
        else:
            template_script = """
            set +ex
            {% for key,val in env.items() %}
            export {{key}}='{{val}}'
            {% endfor %}

            mkdir -p {{path}}
            cd {{path}}
            bash -c \"exec -a startupcmd_{{name}} {{start_cmd}}\"

            """

            script = j.tools.jinja2.render_template(
                template_text=template_script,
                env=self.env,
                path=self.path,
                start_cmd=self.start_cmd,
                name=self.instance_name,
            )
            j.sals.fs.write_file(self.cmd_path, script)
            j.sals.fs.chmod(self.cmd_path, 0o770)
            command = f"sh {self.cmd_path}"

        if self.executor == Executor.FOREGROUND:
            j.sals.process.execute(command)
        elif self.executor == Executor.TMUX:
            self._tmux_window.attached_pane.send_keys(command)

        self.wait_for_running(die=True, timeout=self.timeout)
Esempio n. 20
0
class User(Base):
    emails = fields.List(fields.String())
    permissions = fields.List(fields.Object(Permission))
    custom_config = fields.Typed(dict)
    type = fields.Enum(UserType)
    password = fields.Secret()