Пример #1
0
 def initialize(self, io_loop, defaults=None):
     self.io_loop = io_loop
     self.key_func = get_key_func(getattr(options, 'key_func', None))
     self.default_timeout = getattr(options, 'cache_time', 300)
     self.version = getattr(options, 'version', 1)
     self.key_prefix = getattr(options, 'key_prefix', 'cache')
     self.defaults = dict()
     self._lock = Lock()
     if defaults is not None:
         self.defaults.update(defaults)
Пример #2
0
 def __init__(self, config, paneldue):
     self.ioloop = IOLoop.current()
     self.paneldue = paneldue
     self.port = config.get('serial')
     self.baud = config.getint('baud', 57600)
     self.sendlock = Lock()
     self.partial_input = b""
     self.ser = self.fd = None
     self.connected = False
     self.ioloop.spawn_callback(self._connect)
Пример #3
0
 def __init__(self, dataBaseFilePath, tableNames):
     self.state = stateStopped
     self.jobStoreNames = ["static", "custom", "temporary"]
     self.lock = Lock()
     self.__jobStores = {}
     self.__timeout = None
     self.__instances = defaultdict(lambda: 0)
     self.__pendingJobs = []
     for index, jobStoreName in enumerate(self.jobStoreNames):
         self.__jobStores[jobStoreName] = jobStore(dataBaseFilePath,
                                                   tableNames[index])
Пример #4
0
    def __init__(self, pool):
        """
        Initializes PSQLConnPool

        Parameters
        ----------
        pool : momoko.Pool
            A connection pool to run SQL queries.
        """
        self._pool = pool
        self._connected = False
        self._lock = Lock()
Пример #5
0
 def __init__(self,
              config,
              default_port=None,
              default_user=None,
              default_password=None):
     super().__init__(config)
     self.client = AsyncHTTPClient()
     self.request_mutex = Lock()
     self.addr = config.get("address")
     self.port = config.getint("port", default_port)
     self.user = config.get("user", default_user)
     self.password = config.get("password", default_password)
Пример #6
0
 def __init__(self,
              config: ConfigHelper,
              default_port: int = -1,
              default_user: str = "",
              default_password: str = "",
              default_protocol: str = "http") -> None:
     super().__init__(config)
     self.client = AsyncHTTPClient()
     self.request_mutex = Lock()
     self.addr: str = config.get("address")
     self.port = config.getint("port", default_port)
     self.user = config.get("user", default_user)
     self.password = config.get("password", default_password)
     self.protocol = config.get("protocol", default_protocol)
Пример #7
0
    def get(self, key):
        try:
            self._lock.acquire()
            if key not in self:
                self._session_locks[key] = Lock()
                self[key] = {}
        finally:
            self._lock.release()

        try:
            self._session_locks[key].acquire()
            yield self[key]
        finally:
            self._session_locks[key].release()
Пример #8
0
    def __init__(self, host, port, stream=None, io_loop=None, ssl_options=None,
                 read_timeout=DEFAULT_READ_TIMEOUT):
        self.host = host
        self.port = port
        self.io_loop = io_loop
        self.read_timeout = read_timeout
        self.is_queuing_reads = False
        self.read_queue = []
        self.__wbuf = BytesIO()
        self._read_lock = Lock()
        self.ssl_options = ssl_options

        # servers provide a ready-to-go stream
        self.stream = stream
        if self.stream is not None:
            self._set_close_callback()
Пример #9
0
 def __init__(self, conn, stream_id, delegate, context=None):
     self.conn = conn
     self.stream_id = stream_id
     self.set_delegate(delegate)
     self.context = context
     self.finish_future = Future()
     self.write_lock = Lock()
     from tornado.util import ObjectDict
     # TODO: remove
     self.stream = ObjectDict(io_loop=IOLoop.current(),
                              close=conn.stream.close)
     self._incoming_content_remaining = None
     self._outgoing_content_remaining = None
     self._delegate_started = False
     self.window = Window(
         conn.window, stream_id,
         conn.setting(constants.Setting.INITIAL_WINDOW_SIZE))
     self._header_frames = []
     self._phase = constants.HTTPPhase.HEADERS
Пример #10
0
 def __init__(self,
              factory: ShellCommandFactory,
              cmd: str,
              std_out_callback: OutputCallback,
              std_err_callback: OutputCallback,
              env: Optional[Dict[str, str]] = None,
              log_stderr: bool = False,
              cwd: Optional[str] = None) -> None:
     self.factory = factory
     self.name = cmd
     self.std_out_cb = std_out_callback
     self.std_err_cb = std_err_callback
     cmd = os.path.expanduser(cmd)
     self.command = shlex.split(cmd)
     self.log_stderr = log_stderr
     self.env = env
     self.cwd = cwd
     self.proc: Optional[SCProcess] = None
     self.cancelled = False
     self.return_code: Optional[int] = None
     self.run_lock = Lock()
Пример #11
0
 def __init__(self, unit_in_seconds, callback_process):
     """ Initialize a scheduler.
         :param unit_in_seconds: number of seconds to wait for each step.
         :param callback_process: callback to call on every task.
             Signature:
                 task_callback(task.data) -> bool
             If callback return True, task is considered done and is removed from scheduler.
             Otherwise, task is rescheduled for another delay.
     """
     assert isinstance(unit_in_seconds, int) and unit_in_seconds > 0
     assert callable(callback_process)
     self.unit = unit_in_seconds
     self.current_time = 0
     self.callback_process = callback_process
     self.data_in_heap = PriorityDict()  # data => Deadline
     self.data_in_queue = {
     }  # type: dict{object, _Task}  # data => associated Task in queue
     self.tasks_queue = Queue()
     # Lock to modify this object safely inside one Tornado thread:
     # http://www.tornadoweb.org/en/stable/locks.html
     self.lock = Lock()
Пример #12
0
    def __init__(self, cmd_helper, git_path, alias):
        self.server = cmd_helper.get_server()
        self.cmd_helper = cmd_helper
        self.alias = alias
        self.git_path = git_path
        self.git_cmd = f"git -C {git_path}"
        self.valid_git_repo = False
        self.git_owner = "?"
        self.git_remote = "?"
        self.git_branch = "?"
        self.current_version = "?"
        self.upstream_version = "?"
        self.current_commit = "?"
        self.upstream_commit = "?"
        self.upstream_url = "?"
        self.branches = []
        self.dirty = False
        self.head_detached = False

        self.init_condition = None
        self.git_operation_lock = Lock()
        self.fetch_timeout_handle = None
        self.fetch_input_recd = False
    def __init__(self, name, endpoints, io_loop=None):
        if io_loop:
            warnings.warn('io_loop argument is deprecated.',
                          DeprecationWarning)
        # If it's not the main thread
        # and a current IOloop doesn't exist here,
        # IOLoop.instance becomes self._io_loop
        self.io_loop = io_loop or IOLoop.current()
        # List of available endpoints in which service is resolved to.
        # Looks as [["host", port2], ["host2", port2]]
        self.endpoints = endpoints
        self.name = name
        self.id = generate_service_id(self)

        self.log = servicelog

        self.sessions = {}
        self.counter = itertools.count(1)
        self.api = {}

        self._lock = Lock()

        # wrap into separate class
        self.pipe = None
        self.address = None
        # on_close can be schedulled at any time,
        # even after we've already reconnected. So to prevent
        # from closing wrong connection, each new pipe has its epoch,
        # as id for on_close
        self.pipe_epoch = 0
        self.buffer = msgpack_unpacker()

        self._header_table = {
            'tx': CocaineHeaders(),
            'rx': CocaineHeaders(),
        }
Пример #14
0
 def __init__(self, server):
     self.server = server
     self.file_paths = {}
     self.file_lists = {}
     self.gcode_metadata = {}
     self.metadata_lock = Lock()
     self.server.register_endpoint(
         "/server/files/list", "file_list", ['GET'],
         self._handle_filelist_request)
     self.server.register_endpoint(
         "/server/files/metadata", "file_metadata", ['GET'],
         self._handle_metadata_request)
     self.server.register_endpoint(
         "/server/files/directory", "directory", ['GET', 'POST', 'DELETE'],
         self._handle_directory_request)
     self.server.register_endpoint(
         "/server/files/move", "file_move", ['POST'],
         self._handle_file_move_copy)
     self.server.register_endpoint(
         "/server/files/copy", "file_copy", ['POST'],
         self._handle_file_move_copy)
     # Register APIs to handle file uploads
     self.server.register_upload_handler("/server/files/upload")
     self.server.register_upload_handler("/api/files/local")
Пример #15
0
 def __init__(self, proto_io):
     self.lock = Lock()
     self.proto_io = proto_io
Пример #16
0
 def __init__(self, config: ConfigHelper) -> None:
     super().__init__(config)
     self.request_mutex = Lock()
     self.addr: List[str] = config.get("address").split('/')
     self.port = config.getint("port", 9999)
Пример #17
0
    def __init__(self, config):
        self.server = config.get_server()
        self.config = config
        self.config.read_supplemental_config(SUPPLEMENTAL_CFG_PATH)
        self.repo_debug = config.getboolean('enable_repo_debug', False)
        auto_refresh_enabled = config.getboolean('enable_auto_refresh', False)
        self.distro = config.get('distro', "debian").lower()
        if self.distro not in SUPPORTED_DISTROS:
            raise config.error(f"Unsupported distro: {self.distro}")
        if self.repo_debug:
            logging.warn("UPDATE MANAGER: REPO DEBUG ENABLED")
        env = sys.executable
        mooncfg = self.config[f"update_manager static {self.distro} moonraker"]
        self.updaters = {
            "system": PackageUpdater(self),
            "moonraker": GitUpdater(self, mooncfg, MOONRAKER_PATH, env)
        }
        self.current_update = None
        # TODO: Check for client config in [update_manager].  This is
        # deprecated and will be removed.
        client_repo = config.get("client_repo", None)
        if client_repo is not None:
            client_path = config.get("client_path")
            name = client_repo.split("/")[-1]
            self.updaters[name] = WebUpdater(self, {
                'repo': client_repo,
                'path': client_path
            })
        client_sections = self.config.get_prefix_sections(
            "update_manager client")
        for section in client_sections:
            cfg = self.config[section]
            name = section.split()[-1]
            if name in self.updaters:
                raise config.error("Client repo named %s already added" %
                                   (name, ))
            client_type = cfg.get("type")
            if client_type == "git_repo":
                self.updaters[name] = GitUpdater(self, cfg)
            elif client_type == "web":
                self.updaters[name] = WebUpdater(self, cfg)
            else:
                raise config.error("Invalid type '%s' for section [%s]" %
                                   (client_type, section))

        # GitHub API Rate Limit Tracking
        self.gh_rate_limit = None
        self.gh_limit_remaining = None
        self.gh_limit_reset_time = None
        self.gh_init_evt = Event()
        self.cmd_request_lock = Lock()
        self.is_refreshing = False

        # Auto Status Refresh
        self.last_auto_update_time = 0
        self.refresh_cb = None
        if auto_refresh_enabled:
            self.refresh_cb = PeriodicCallback(self._handle_auto_refresh,
                                               UPDATE_REFRESH_INTERVAL_MS)
            self.refresh_cb.start()

        AsyncHTTPClient.configure(None, defaults=dict(user_agent="Moonraker"))
        self.http_client = AsyncHTTPClient()

        self.server.register_endpoint("/machine/update/moonraker", ["POST"],
                                      self._handle_update_request)
        self.server.register_endpoint("/machine/update/klipper", ["POST"],
                                      self._handle_update_request)
        self.server.register_endpoint("/machine/update/system", ["POST"],
                                      self._handle_update_request)
        self.server.register_endpoint("/machine/update/client", ["POST"],
                                      self._handle_update_request)
        self.server.register_endpoint("/machine/update/status", ["GET"],
                                      self._handle_status_request)

        # Register Ready Event
        self.server.register_event_handler("server:klippy_identified",
                                           self._set_klipper_repo)
        # Initialize GitHub API Rate Limits and configured updaters
        IOLoop.current().spawn_callback(self._initalize_updaters,
                                        list(self.updaters.values()))
Пример #18
0
 def __init__(self, server):
     self.server = server
     self.lock = Lock()
     self.metadata = {}
     self.script_response = None
Пример #19
0
 def __init__(self, provided_max_sequence):
     super(TornadoPublishSequenceManager, self).__init__(provided_max_sequence)
     self._lock = Lock()
     self._ioloop = ioloop
Пример #20
0
 def setUp(self):
     self.wrapper = fixtures_app.RubyPcsdWrapper(ruby_pcsd.SINATRA_REMOTE)
     self.https_server_manage = mock.MagicMock(
         spec_set=http_server.HttpsServerManage)
     self.lock = Lock()
     super().setUp()
Пример #21
0
 def _write_lock_default(self):
     return Lock()
Пример #22
0
 def __init__(self, socket_path, io_loop=None):
     super(IPCMessageSubscriber, self).__init__(socket_path,
                                                io_loop=io_loop)
     self._read_stream_future = None
     self._saved_data = []
     self._read_in_progress = Lock()
Пример #23
0
def main():
    signal.signal(signal.SIGTERM, handle_signal)
    signal.signal(signal.SIGINT, handle_signal)

    Path(settings.pcsd_log_location).touch(mode=0o600, exist_ok=True)
    log.setup(settings.pcsd_log_location)

    env = prepare_env(os.environ, log.pcsd)
    if env.has_errors:
        raise SystemExit(1)

    if env.PCSD_DEBUG:
        log.enable_debug()

    sync_config_lock = Lock()
    ruby_pcsd_wrapper = ruby_pcsd.Wrapper(
        settings.pcsd_ruby_socket,
        debug=env.PCSD_DEBUG,
    )
    make_app = configure_app(
        session.Storage(env.PCSD_SESSION_LIFETIME),
        ruby_pcsd_wrapper,
        sync_config_lock,
        env.PCSD_STATIC_FILES_DIR,
        disable_gui=env.PCSD_DISABLE_GUI,
        debug=env.PCSD_DEV,
    )
    pcsd_ssl = ssl.PcsdSSL(
        server_name=socket.gethostname(),
        cert_location=settings.pcsd_cert_location,
        key_location=settings.pcsd_key_location,
        ssl_options=env.PCSD_SSL_OPTIONS,
        ssl_ciphers=env.PCSD_SSL_CIPHERS,
    )
    try:
        SignalInfo.server_manage = HttpsServerManage(
            make_app,
            port=env.PCSD_PORT,
            bind_addresses=env.PCSD_BIND_ADDR,
            ssl=pcsd_ssl,
        ).start()
    except socket.gaierror as e:
        log.pcsd.error(
            "Unable to bind to specific address(es), exiting: %s ", e
        )
        raise SystemExit(1) from e
    except OSError as e:
        log.pcsd.error("Unable to start pcsd daemon, exiting: %s ", e)
        raise SystemExit(1) from e
    except ssl.SSLCertKeyException as e:
        for error in e.args:
            log.pcsd.error(error)
        log.pcsd.error("Invalid SSL certificate and/or key, exiting")
        raise SystemExit(1) from e

    ioloop = IOLoop.current()
    ioloop.add_callback(sign_ioloop_started)
    if systemd.is_systemd() and env.NOTIFY_SOCKET:
        ioloop.add_callback(systemd.notify, env.NOTIFY_SOCKET)
    ioloop.add_callback(config_sync(sync_config_lock, ruby_pcsd_wrapper))
    ioloop.start()
Пример #24
0
 def initialize(self, wechat_event_setting, **kwargs):
     super(EventSettingHandler, self).initialize(**kwargs)
     self.wechat_event_message_setting = wechat_event_setting
     self.setting_lock = Lock()
Пример #25
0
 def __init__(self, config):
     super().__init__(config)
     self.request_mutex = Lock()
     self.addr = config.get("address").split('/')
     self.port = config.getint("port", 9999)
Пример #26
0
    ToolbarBox, Toolbar, SaveTool, WheelZoomTool
import gc
from bokeh.models.widgets import Div, \
    DatePicker, Select

from datetime import datetime, timedelta, date

import holoviews as hv
from holoviews import opts
from tornado.gen import coroutine
import numpy as np
import pandas as pd

from static.css.KPI_interface import KPI_card_css

lock = Lock()
executor = ThreadPoolExecutor()
logger = mylogger(__file__)

hv.extension('bokeh', logo=False)
renderer = hv.renderer('bokeh')


@coroutine
def KPI_projects_tab(panel_title, DAYS_TO_LOAD=90):
    timeline_source = ColumnDataSource(data=dict(
        Item=[], Start=[], End=[], Color=[], start=[], end=[], ID=[], ID1=[]))

    class Thistab(KPI):
        def __init__(self, table, cols=[]):
            KPI.__init__(self, table, name='project', cols=cols)
Пример #27
0
 def __init__(self, **settings):
     super().__init__(**settings)
     self.lock = Lock()
     self.storage_state = self.__load_state_from_storage()
Пример #28
0
class AdminHandler(UserBaseHandler):
    queues: Iterable[str] = []
    queueTimer = None
    queueTimerLock = Lock()

    def initialize(self, *args, **kwargs):
        super().initialize(*args, **kwargs)
        self.ADMIN_FUNCTIONS: Dict[str, Coroutine[None, None, str]] = {
            "addDeviceType": self.addDeviceType,
            "addDevice": self.addDevice,
            "addAdmin": self.addAdmin,
            "changeDevicePassword": self.changeDevicePassword,
            "rmDevice": self.rmDevice,
            "killSession": self.killSession,
            "toggleQueue": self.toggle_queue,
        }
        if not AdminHandler.queueTimer:
            ioloop.IOLoop.current().add_callback(self.startQueueUpdateTimer)

    @classmethod
    async def startQueueUpdateTimer(cls):
        async with cls.queueTimerLock:
            if cls.queueTimer is None:
                cls.queueTimer = Timer(cls.queueUpdate, timeout=300)
                await cls.queueUpdate(
                )  # call it here, otherwise the first call is in 300 seconds

    @classmethod
    async def queueUpdate(cls):
        with make_session() as session:
            try:
                cls.queues = await as_future(
                    session.query(
                        DeviceType.id,
                        DeviceType.name,
                        func.count(UserQueue.userId),
                        DeviceType.enabled,
                    ).select_from(DeviceType).join(UserQueue,
                                                   isouter=True).group_by(
                                                       DeviceType.id,
                                                       DeviceType.name).all)
            except Exception:
                pass  # Uhhhhh... what do?

    @authenticated
    async def get(self):
        with self.make_session() as session:
            try:
                roles = await as_future(
                    session.query(Role.name).join(UserRoles).join(User).filter(
                        User.id == self.current_user).all)
                for role in roles:
                    if 'Admin' in role:
                        break
                else:
                    return self.redirect(self.reverse_url("main"))
            except Exception:
                return self.redirect(self.reverse_url("main"))

        return self.render("admin.html", queues=self.queues, messages=None)

    @authenticated
    async def post(self):
        with self.make_session() as session:
            try:
                roles = await as_future(
                    session.query(Role.name).join(UserRoles).join(User).filter(
                        User.id == self.current_user).all)
                for role in roles:
                    if 'Admin' in role:
                        break
                else:
                    return self.redirect(self.reverse_url("main"))
            except Exception:
                return self.redirect(self.reverse_url("main"))

        # Try and get the type parameter so we can decide what type of request this is
        try:
            req_type = self.get_argument("type")
        except MissingArgumentError:
            return self.render("admin.html",
                               messages="Error in form submission")

        try:
            errors = await self.ADMIN_FUNCTIONS.get(
                req_type, (lambda: "Invalid function"))()
        except Exception as e:
            errors = str(e)
        finally:
            if errors:
                print("Invalid Admin function by user {}: {}".format(
                    self.current_user, req_type))
                return self.render("error.html", error=errors)

        return self.render("admin.html", queues=self.queues, messages=errors)

    async def addDeviceType(self) -> str:
        try:
            name = self.get_argument("name")
        except MissingArgumentError:
            return "Missing device type"

        with self.make_session() as session:
            await as_future(
                partial(session.add, DeviceType(name=name, enabled=1)))

        await self.queueUpdate()

        return ""

    async def addDevice(self) -> str:
        try:
            device_info = self.get_argument("device_info")
            device_type = self.get_argument("device_type")
        except MissingArgumentError:
            return "Missing device type or config file info"

        config = ConfigParser()
        try:
            config.read_string(device_info)
        except KeyError:
            return "Error while trying to read the config file"

        error_msg = ""
        with self.make_session() as session:
            device_type_id = (await as_future(
                session.query(DeviceType.id).filter_by(name=device_type).one
            ))[0]

            for section in config.sections():
                try:
                    session.add(
                        DeviceQueue(
                            name=config[section]["username"],
                            password=PasswordHasher.hash(
                                config[section]["password"]),
                            state="want-provision",
                            type=device_type_id,
                        ))
                except Exception as e:
                    error_msg += str(e) + "\n"
                    continue

        return error_msg

    async def addAdmin(self) -> str:
        if ctfd_db:
            return "Please use the CTFd system to manage admin roles"
        try:
            username = self.get_argument("username")
            password = self.get_argument("password")
        except MissingArgumentError:
            return "Missing username or password"

        with self.make_session() as session:
            try:
                admin = await as_future(
                    session.query(Role).filter_by(name="Admin").first)
                human = await as_future(
                    session.query(Role).filter_by(name="Human").first)
                device = await as_future(
                    session.query(Role).filter_by(name="Device").first)
            except Exception:
                return "Error while finding roles"

            try:
                await as_future(
                    partial(
                        session.add,
                        User(
                            name=username,
                            password=PasswordHasher.hash(password),
                            roles=[admin, human, device],
                        ),
                    ))
            except Exception:
                return "Error while attempting to add user"
        return ""

    async def changeDevicePassword(self) -> str:
        try:
            username = self.get_argument("username")
            password = self.get_argument("password")
        except MissingArgumentError:
            return "Missing username or password"

        with self.make_session() as session:
            try:
                device = await as_future(
                    session.query(DeviceQueue).filter_by(name=username).one)
                device.password = PasswordHasher.hash(password)
            except Exception:
                return "Error while updating password"

        return ""

    async def rmDevice(self) -> str:
        # TODO: is this safe to do? what are the implications if someone is connected?
        try:
            device = self.get_argument("device")
        except MissingArgumentError:
            return "Missing device"

        with self.make_session() as session:
            try:
                device = await as_future(
                    session.query(DeviceQueue).filter_by(name=device).one)
                await as_future(partial(session.delete, device))
            except Exception:
                return "Failed to remove device"
        return ""

    async def killSession(self) -> str:
        try:
            deviceName = self.get_argument("device")
        except MissingArgumentError:
            return "Missing Device name"

        with self.make_session() as session:
            try:
                deviceID = await as_future(
                    session.query(
                        DeviceQueue.id).filter_by(name=deviceName).one)
            except Exception:
                return "Error while looking up device"

        await DeviceStateHandler.killSession(deviceID[0])
        return ""

    async def toggle_queue(self) -> str:
        try:
            queueID = self.get_argument("queue")
        except MissingArgumentError:
            return "Missing Device name"

        with self.make_session() as session:
            try:
                queue = await as_future(
                    session.query(DeviceType).filter_by(id=queueID).one)
            except Exception:
                return "Failed to find that queue type"

            queue.enabled = 1 if not queue.enabled else 0

        await self.queueUpdate()

        return ""
Пример #29
0
class SystemListAPI(BaseHandler):

    parser = BeerGardenSchemaParser()
    logger = logging.getLogger(__name__)

    REQUEST_FIELDS = set(SystemSchema.get_attribute_names())

    # Need to ensure that Systems are updated atomically
    system_lock = Lock()

    @authenticated(permissions=[Permissions.SYSTEM_READ])
    def get(self):
        """
        ---
        summary: Retrieve all Systems
        description: |
          This endpoint allows for querying Systems.

          There are several parameters that control which fields are returned
          and what information is available. Things to be aware of:

          * The `include_commands` parameter is __deprecated__. Don't use it.
            Use `exclude_fields=commands` instead.

          * It's possible to specify `include_fields` _and_ `exclude_fields`.
            This doesn't make a lot of sense, but you can do it. If the same
            field is in both `exclude_fields` takes priority (the field will
            NOT be included in the response).

          Systems matching specific criteria can be filtered using additional
          query parameters. This is a very basic capability:

          * ?name=foo&version=1.0.0
            This will return the system named 'foo' with version '1.0.0'
          * ?name=foo&name=bar
            This will not do what you expect: only return the system named
            'bar' will be returned.
        parameters:
          - name: include_fields
            in: query
            required: false
            description: Specify fields to include in the response. All other
              fields will be excluded.
            type: array
            collectionFormat: csv
            items:
              type: string
          - name: exclude_fields
            in: query
            required: false
            description: Specify fields to exclude from the response
            type: array
            collectionFormat: csv
            items:
              type: string
          - name: dereference_nested
            in: query
            required: false
            description: Commands and instances will be an object id
            type: boolean
            default: true
          - name: include_commands
            in: query
            required: false
            description: __DEPRECATED__ Include commands in the response.
              Use `exclude_fields=commands` instead.
            type: boolean
            default: true
        responses:
          200:
            description: All Systems
            schema:
              type: array
              items:
                $ref: '#/definitions/System'
          50x:
            $ref: '#/definitions/50xError'
        tags:
          - Systems
        """
        query_set = System.objects.order_by(
            self.request.headers.get('order_by', 'name'))
        serialize_params = {'to_string': True, 'many': True}

        include_fields = self.get_query_argument('include_fields', None)
        exclude_fields = self.get_query_argument('exclude_fields', None)
        dereference_nested = self.get_query_argument('dereference_nested',
                                                     None)
        include_commands = self.get_query_argument('include_commands', None)

        if include_fields:
            include_fields = set(
                include_fields.split(',')) & self.REQUEST_FIELDS
            query_set = query_set.only(*include_fields)
            serialize_params['only'] = include_fields

        if exclude_fields:
            exclude_fields = set(
                exclude_fields.split(',')) & self.REQUEST_FIELDS
            query_set = query_set.exclude(*exclude_fields)
            serialize_params['exclude'] = exclude_fields

        if include_commands and include_commands.lower() == 'false':
            query_set = query_set.exclude('commands')

            if 'exclude' not in serialize_params:
                serialize_params['exclude'] = set()
            serialize_params['exclude'].add('commands')

        if dereference_nested and dereference_nested.lower() == 'false':
            query_set = query_set.no_dereference()

        # TODO - Handle multiple query arguments with the same key
        # for example: (?name=foo&name=bar) ... what should that mean?
        filter_params = {}

        # Need to use self.request.query_arguments to get all the query args
        for key in self.request.query_arguments:
            if key in self.REQUEST_FIELDS:
                # Now use get_query_argument to get the decoded value
                filter_params[key] = self.get_query_argument(key)

        result_set = query_set.filter(**filter_params)

        self.set_header('Content-Type', 'application/json; charset=UTF-8')
        self.write(self.parser.serialize_system(result_set,
                                                **serialize_params))

    @coroutine
    @authenticated(permissions=[Permissions.SYSTEM_CREATE])
    def post(self):
        """
        ---
        summary: Create a new System or update an existing System
        description: If the System does not exist it will be created. If the System already exists
            it will be updated (assuming it passes validation).
        parameters:
          - name: system
            in: body
            description: The System definition to create / update
            schema:
              $ref: '#/definitions/System'
        responses:
          200:
            description: An existing System has been updated
            schema:
              $ref: '#/definitions/System'
          201:
            description: A new System has been created
            schema:
              $ref: '#/definitions/System'
          400:
            $ref: '#/definitions/400Error'
          50x:
            $ref: '#/definitions/50xError'
        tags:
          - Systems
        """
        self.request.event.name = Events.SYSTEM_CREATED.name

        system_model = self.parser.parse_system(self.request.decoded_body,
                                                from_string=True)

        with (yield self.system_lock.acquire()):
            # See if we already have a system with this name + version
            existing_system = System.find_unique(system_model.name,
                                                 system_model.version)

            if not existing_system:
                self.logger.debug("Creating a new system: %s" %
                                  system_model.name)
                saved_system, status_code = self._create_new_system(
                    system_model)
            else:
                self.logger.debug("System %s already exists. Updating it." %
                                  system_model.name)
                self.request.event.name = Events.SYSTEM_UPDATED.name
                saved_system, status_code = self._update_existing_system(
                    existing_system, system_model)

            saved_system.deep_save()

        self.request.event_extras = {'system': saved_system}

        self.set_status(status_code)
        self.write(
            self.parser.serialize_system(saved_system,
                                         to_string=False,
                                         include_commands=True))

    @staticmethod
    def _create_new_system(system_model):
        new_system = system_model

        # Assign a default 'main' instance if there aren't any instances and there can only be one
        if not new_system.instances or len(new_system.instances) == 0:
            if new_system.max_instances is None or new_system.max_instances == 1:
                new_system.instances = [Instance(name='default')]
                new_system.max_instances = 1
            else:
                raise ModelValidationError(
                    'Could not create system %s-%s: Systems with '
                    'max_instances > 1 must also define their instances' %
                    (system_model.name, system_model.version))
        else:
            if not new_system.max_instances:
                new_system.max_instances = len(new_system.instances)

        return new_system, 201

    @staticmethod
    def _update_existing_system(existing_system, system_model):
        # Raise an exception if commands already exist for this system and they differ from what's
        # already in the database in a significant way
        if existing_system.commands and 'dev' not in existing_system.version and \
                existing_system.has_different_commands(system_model.commands):
            raise ModelValidationError(
                'System %s-%s already exists with different commands' %
                (system_model.name, system_model.version))
        else:
            existing_system.upsert_commands(system_model.commands)

        # Update instances
        if not system_model.instances or len(system_model.instances) == 0:
            system_model.instances = [Instance(name='default')]

        for attr in ['description', 'icon_name', 'display_name']:
            value = getattr(system_model, attr)

            # If we set an attribute on the model as None, mongoengine marks the attribute for
            # deletion. We want to prevent this, so we set it to an emtpy string.
            if value is None:
                setattr(existing_system, attr, "")
            else:
                setattr(existing_system, attr, value)

        # Update metadata
        new_metadata = system_model.metadata or {}
        existing_system.metadata.update(new_metadata)

        old_instances = [
            inst for inst in existing_system.instances
            if system_model.has_instance(inst.name)
        ]
        new_instances = [
            inst for inst in system_model.instances
            if not existing_system.has_instance(inst.name)
        ]
        existing_system.instances = old_instances + new_instances

        return existing_system, 200
Пример #30
0
 def configure(cls, backend, thread_pool):
     cls.CLIENT = AsyncHTTPClient(io_loop=IOLoop.current())
     cls.BACKEND = backend
     cls.THREAD_POOL = thread_pool
     cls.XMLRPC = ServerProxy(str(copy(backend)(path="/pypi")), )
     cls.LOCK = Lock()