Esempio n. 1
0
    def start(self, detach=False, profile=False, loop=True):
        msg = 'Start database %s %s %s' % (detach, profile, loop)
        log_info(msg)
        self.profile = '{0}/log/profile'.format(self.target) if profile else None
        # Daemon mode
        if detach:
            become_daemon()

        # Find out the IP to listen to
        address = self.config.get_value('listen-address').strip()
        if not address:
            raise ValueError, 'listen-address is missing from config.conf'
        if address == '*':
            address = None

        # Check port
        if self.port is None:
            raise ValueError('listen-port is missing from config.conf')

        # Save PID
        pid = getpid()
        with open(self.target + '/pid', 'w') as f:
            f.write(str(pid))
        # Call method on root at start
        with self.database.init_context() as context:
            context.root.launch_at_start(context)
        # Listen & set context
        if not self.read_only:
            self.launch_cron()
        self.listen(address, self.port)

        # XXX The interpreter do not go here
        #self.server.root.launch_at_stop(context)
        ## Ok
        return True
Esempio n. 2
0
 def log_access(self, host, request_line, status_code, body_length):
     if host:
         host = host.split(',', 1)[0].strip()
     now = strftime('%d/%b/%Y:%H:%M:%S')
     message = '%s - - [%s] "%s" %d %d\n' % (host, now, request_line,
                                             status_code, body_length)
     log_info(message, domain='itools.web_access')
Esempio n. 3
0
    def load(self):
        path = expanduser('~/.usine')
        if lfs.is_file(path):
            log_fatal('ERROR: %s is a file, remove it first' % path)

        # Make the user configuration file if needed
        if not lfs.exists(path):
            log_info('Making the configuration folder: {}'.format(path))
            lfs.make_folder(path)
            log_fatal('Now add the INI files within the folder')

        # Read the user configuration file
        ini = [ '%s/%s' % (path, x)
                for x in lfs.get_names(path) if x[-4:] == '.ini' ]
        if len(ini) == 0:
            log_fatal('ERROR: zero INI files found in {}/'.format(path))

        # Read the ini file
        cfg = RawConfigParser()
        cfg.read(ini)

        # Get the data
        for section in cfg._sections:
            options = cfg._sections[section]
            type, name = section.split()
            module = modules[type]
            obj = module(options)

            # Keep the data unit
            self.by_type.setdefault(type, []).append(obj)
            self.by_type_and_name[(type, name)] = obj

        # Sort
        for type in self.by_type:
            self.by_type[type].sort(key=lambda x: x.name)
Esempio n. 4
0
 def log_access(self, host, request_line, status_code, body_length):
     if host:
         host = host.split(',', 1)[0].strip()
     now = strftime('%d/%b/%Y:%H:%M:%S')
     message = '%s - - [%s] "%s" %d %d\n' % (host, now, request_line,
                                             status_code, body_length)
     log_info(message, domain='itools.web_access')
Esempio n. 5
0
    def _smtp_send(self):
        nb_max_mails_to_send = 2
        spool = lfs.open(self.spool)

        def get_names():
            # Find out emails to send
            locks = set()
            names = set()
            for name in spool.get_names():
                if name == 'failed':
                    # Skip "failed" special directory
                    continue
                if name[-5:] == '.lock':
                    locks.add(name[:-5])
                else:
                    names.add(name)
            names.difference_update(locks)
            return names

        # Send emails
        names = get_names()
        smtp_host = self.smtp_host
        for name in list(names)[:nb_max_mails_to_send]:
            # 1. Open connection
            try:
                smtp = SMTP(smtp_host)
            except Exception:
                self.smtp_log_error()
                spool.move(name, 'failed/%s' % name)
                return 60 if get_names() else False
            log_info('CONNECTED to %s' % smtp_host)

            # 2. Login
            if self.smtp_login and self.smtp_password:
                smtp.login(self.smtp_login, self.smtp_password)

            # 3. Send message
            try:
                message = spool.open(name).read()
                headers = HeaderParser().parsestr(message)
                subject = headers['subject']
                from_addr = headers['from']
                to_addr = headers['to']
                smtp.sendmail(from_addr, to_addr, message)
                # Remove
                spool.remove(name)
                # Log
                log_msg = 'Email "%s" sent from "%s" to "%s"'
                log_info(log_msg % (subject, from_addr, to_addr))
            except SMTPRecipientsRefused:
                # The recipient addresses has been refused
                self.smtp_log_error()
                spool.move(name, 'failed/%s' % name)
            except SMTPResponseException, excp:
                # The SMTP server returns an error code
                self.smtp_log_error()
                spool.move(name, 'failed/%s_%s' % (excp.smtp_code, name))
            except Exception:
                self.smtp_log_error()
Esempio n. 6
0
 def index_document(self, document):
     self.nb_changes += 1
     abspath, term, xdoc = self.get_xdoc_from_document(document)
     self._db.replace_document(term, xdoc)
     if self.logger:
         log_info(abspath, domain='itools.catalog')
     if self.root:
         self.transaction_abspaths.append(abspath)
Esempio n. 7
0
 def stop(self, force=False):
     msg = 'Stoping server...'
     log_info(msg)
     print(msg)
     # Stop wsgi server
     if self.wsgi_server:
         self.wsgi_server.stop()
     # Close database
     self.close()
Esempio n. 8
0
 def unindex_document(self, abspath):
     """Remove the document that has value stored in its abspath.
        If the document does not exist => no error
     """
     self.nb_changes += 1
     data = _reduce_size(_encode(self._fields['abspath'], abspath))
     self._db.delete_document('Q' + data)
     if self.logger:
         log_info(abspath, domain='itools.catalog')
Esempio n. 9
0
 def wrapper(*args, **kwargs):
     # Remove action in the function name
     func_name = func.__name__.split('action_')[1]
     start_dtime = datetime.now()
     log_info('Start {} ({})'.format(func_name, start_dtime))
     # Function call !
     func(*args, **kwargs)
     duration = datetime.now() - start_dtime
     log_info('End {} (duration : {})'.format(func_name, duration))
Esempio n. 10
0
 def unindex_document(self, abspath):
     """Remove the document that has value stored in its abspath.
        If the document does not exist => no error
     """
     self.nb_changes += 1
     data = _reduce_size(_encode(self._fields['abspath'], abspath))
     self._db.delete_document('Q' + data)
     if self.logger:
         log_info(abspath, domain='itools.catalog')
Esempio n. 11
0
 def action_test(self):
     """ Test if ikaaro instances of this Python environment are alive"""
     for ikaaro in config.get_sections_by_type('ikaaro'):
         if ikaaro.options['pyenv'] == self.name:
             uri = ikaaro.options['uri']
             for i in range(1, 6):
                 try:
                     lfs.open('{}/;_ctrl'.format(uri))
                 except Exception:
                     log_error('[ERROR {}/5] {}'.format(i, uri))
                     sleep(0.5)
                 else:
                     log_info('[OK] {}'.format(uri))
                     break
Esempio n. 12
0
 def check_consistency(self, quick):
     log_info('Check database consistency')
     # Check the server is not running
     if self.read_only:
         pid = get_pid('%s/pid_ro' % self.target)
     else:
         pid = get_pid('%s/pid' % self.target)
     if pid is not None:
         msg = '[%s] The Web Server is already running.' % self.target
         log_warning(msg)
         print(msg)
         return False
     # Ok
     return True
Esempio n. 13
0
    def action_install(self):
        """Installs every required package (and dependencies) into the remote virtual
        environment.
        """
        # Get host
        host = self.get_host()
        # Build commands
        install_command = '%s setup.py --quiet install --force' % self.bin_python
        pip_install_command = '%s install -r requirements.txt --upgrade' % self.bin_pip
        prefix = self.options.get('prefix')
        if prefix:
            pip_install_command += ' --prefix=%s' % prefix
            install_command += ' --prefix=%s' % prefix

        # Get package install paths
        paths = {}
        for name, version in self.get_packages():
            source = self.get_source(name)
            if self.is_local:
                source_path = source.get_path()
                paths[name] = source_path
            else:
                # If remove we need to untar sources
                log_info('UNTAR sources for {}'.format(name))
                source = self.get_source(name)
                pkgname = source.get_pkgname()
                host.run('tar xzf %s.tar.gz' % pkgname, '/tmp')
                source_path = '/tmp/%s' % pkgname
                paths[name] = source_path

        # Install
        for name, path in paths.iteritems():
            try:
                log_info('INSTALL DEPENDENCIES for {}'.format(name))
                host.run(pip_install_command, path)
            except EnvironmentError:
                # In case there is no requirements.txt
                log_info('No file requirements.txt found, ignore')
                pass
            # Install
                log_info('INSTALL package {}'.format(name))
            host.run(install_command, path)

            if not self.is_local:
                # Clean untar sources
                log_info('DELETE untar sources {}'.format(path))
                host.run('rm -rf %s' % path, '/tmp')
Esempio n. 14
0
 def cron_manager(self):
     database = self.database
     error = False
     # Build fake context
     with database.init_context() as context:
         context.is_cron = True
         context.git_message = u'[CRON]'
         # Go
         t0 = time()
         catalog = database.catalog
         query = RangeQuery('next_time_event', None, context.timestamp)
         search = database.search(query)
         if not search:
             return self.config.get_value('cron-interval')
         nb = len(search)
         msg = 'Cron launched for {nb} resources'.format(nb=nb)
         log_info(msg, domain='itools.cron')
         for brain in search.get_documents():
             tcron0 = time()
             payload = brain.next_time_event_payload
             if payload:
                 payload = pickle.loads(payload)
             resource = database.get_resource(brain.abspath)
             try:
                 resource.time_event(payload)
             except Exception:
                 # Log error
                 log_error('Cron error\n' + format_exc())
                 context.root.alert_on_internal_server_error(context)
                 # Abort changes
                 database.abort_changes()
                 # With error
                 error = True
                 break
             # Reindex resource without committing
             values = resource.get_catalog_values()
             catalog.index_document(values)
             catalog.save_changes()
             # Log
             tcron1 = time()
             msg = 'Done for %s in %s seconds' % (brain.abspath, tcron1-tcron0)
             log_info(msg, domain='itools.cron')
         # Save changes
         if not error:
             try:
                 database.save_changes()
             except Exception:
                 log_error('Cron error on save changes\n' + format_exc())
                 context.root.alert_on_internal_server_error(context)
         # Message
         t1 = time()
         if not error:
             msg = '[OK] Cron finished for {nb} resources in {s} seconds'.format(nb=nb, s=t1-t0)
         else:
             msg = '[ERROR] Cron finished for {nb} resources in {s} seconds'.format(nb=nb, s=t1-t0)
         log_info(msg, domain='itools.cron')
     # Again, and again
     return self.config.get_value('cron-interval')
Esempio n. 15
0
 def listen(self, address, port):
     # Language negotiation
     init_language_selector(select_language)
     # Say hello
     address = address if address is not None else '*'
     print 'Listen %s:%d' % (address, port)
     msg = 'Listing at port %s' % port
     log_info(msg)
     self.port = port
     # Say hello
     address = address if address is not None else '*'
     msg = 'Listen %s:%d' % (address, port)
     log_info(msg)
     self.port = port
     self.wsgi_server = WSGIServer(
         ('', port), application,
         handler_class=ServerHandler,
         log=self.access_log)
     gevent_signal(SIGTERM, self.stop)
     gevent_signal(SIGINT, self.stop)
     if self.profile:
         runctx("self.wsgi_server.serve_forever()", globals(), locals(), self.profile)
     else:
         self.wsgi_server.serve_forever()
Esempio n. 16
0
class Server(WebServer):
    def __init__(self,
                 target,
                 read_only=False,
                 cache_size=None,
                 profile_space=False):
        target = lfs.get_absolute_path(target)
        self.target = target

        # Load the config
        config = get_config(target)
        self.config = config
        load_modules(config)

        # Contact Email
        self.smtp_from = config.get_value('smtp-from')

        # Full-text indexing
        self.index_text = config.get_value('index-text',
                                           type=Boolean,
                                           default=True)

        # Profile Memory
        if profile_space is True:
            import guppy.heapy.RM

        # The database
        if cache_size is None:
            cache_size = config.get_value('database-size')
        if ':' in cache_size:
            size_min, size_max = cache_size.split(':')
        else:
            size_min = size_max = cache_size
        size_min, size_max = int(size_min), int(size_max)
        read_only = read_only or config.get_value('database-readonly')
        database = get_database(target, size_min, size_max, read_only)
        self.database = database

        # Find out the root class
        root = get_root(database)

        # Initialize
        access_log = '%s/log/access' % target
        super(Server, self).__init__(root, access_log=access_log)

        # Email service
        self.spool = lfs.resolve2(self.target, 'spool')
        spool_failed = '%s/failed' % self.spool
        if not lfs.exists(spool_failed):
            lfs.make_folder(spool_failed)
        # Configuration variables
        get_value = config.get_value
        self.smtp_host = get_value('smtp-host')
        self.smtp_login = get_value('smtp-login', default='').strip()
        self.smtp_password = get_value('smtp-password', default='').strip()
        # Email is sent asynchronously
        self.flush_spool()

        # Logging
        log_file = '%s/log/events' % target
        log_level = config.get_value('log-level')
        if log_level not in log_levels:
            msg = 'configuraion error, unexpected "%s" value for log-level'
            raise ValueError, msg % log_level
        log_level = log_levels[log_level]
        logger = Logger(log_file, log_level, rotate=timedelta(weeks=3))
        register_logger(logger, None)
        logger = WebLogger(log_file, log_level)
        register_logger(logger, 'itools.web')

        # Session timeout
        self.session_timeout = get_value('session-timeout')

    def get_pid(self):
        return get_pid('%s/pid' % self.target)

    def set_context(self, path, context):
        context = super(Server, self).set_context(path, context)
        context.database = self.database

    def listen(self, address, port):
        super(Server, self).listen(address, port)
        # Set ui
        context = StaticContext(local_path=get_abspath('ui'))
        self.set_context('/ui', context)
        for name in skin_registry:
            skin = skin_registry[name]
            context = StaticContext(local_path=skin.key)
            self.set_context('/ui/%s' % name, context)

    def is_running_in_rw_mode(self):
        address = self.config.get_value('listen-address').strip()
        if address == '*':
            address = '127.0.0.1'
        port = self.config.get_value('listen-port')

        url = 'http://%s:%s/;_ctrl' % (address, port)
        try:
            h = vfs.open(url)
        except GError:
            # The server is not running
            return False

        data = h.read()
        return json.loads(data)['read-only'] is False

    #######################################################################
    # Mailing
    #######################################################################
    def get_spool_size(self):
        spool = lfs.open(self.spool)
        # We have always a 'failed' directory => "-1"
        return len(spool.get_names()) - 1

    def save_email(self, message):
        # Check the SMTP host is defined
        if not self.smtp_host:
            raise ValueError, '"smtp-host" is not set in config.conf'

        spool = lfs.resolve2(self.target, 'spool')
        tmp_file, tmp_path = mkstemp(dir=spool)
        file = fdopen(tmp_file, 'w')
        try:
            file.write(message.as_string())
        finally:
            file.close()

    def flush_spool(self):
        cron(self._smtp_send, timedelta(seconds=1))

    def send_email(self, message):
        self.save_email(message)
        self.flush_spool()

    def _smtp_send(self):
        nb_max_mails_to_send = 2
        spool = lfs.open(self.spool)

        def get_names():
            # Find out emails to send
            locks = set()
            names = set()
            for name in spool.get_names():
                if name == 'failed':
                    # Skip "failed" special directory
                    continue
                if name[-5:] == '.lock':
                    locks.add(name[:-5])
                else:
                    names.add(name)
            names.difference_update(locks)
            return names

        # Send emails
        names = get_names()
        smtp_host = self.smtp_host
        for name in list(names)[:nb_max_mails_to_send]:
            # 1. Open connection
            try:
                smtp = SMTP(smtp_host)
            except gaierror, excp:
                log_warning('%s: "%s"' % (excp[1], smtp_host))
                return 60  # 1 minute
            except Exception:
                self.smtp_log_error()
                return 60  # 1 minute
            log_info('CONNECTED to %s' % smtp_host)

            # 2. Login
            if self.smtp_login and self.smtp_password:
                smtp.login(self.smtp_login, self.smtp_password)

            # 3. Send message
            try:
                message = spool.open(name).read()
                headers = HeaderParser().parsestr(message)
                subject = headers['subject']
                from_addr = headers['from']
                to_addr = headers['to']
                smtp.sendmail(from_addr, to_addr, message)
                # Remove
                spool.remove(name)
                # Log
                log_msg = 'Email "%s" sent from "%s" to "%s"'
                log_info(log_msg % (subject, from_addr, to_addr))
            except SMTPRecipientsRefused:
                # The recipient addresses has been refused
                self.smtp_log_error()
                spool.move(name, 'failed/%s' % name)
            except SMTPResponseException, excp:
                # The SMTP server returns an error code
                self.smtp_log_error()
                spool.move(name, 'failed/%s_%s' % (excp.smtp_code, name))
Esempio n. 17
0
class Server(WebServer):

    timestamp = None
    port = None
    environment = {}
    modules = []

    def __init__(self, target, read_only=False, cache_size=None,
                 profile_space=False):
        target = lfs.get_absolute_path(target)
        self.target = target
        self.read_only = read_only
        # Set timestamp
        self.timestamp = str(int(time() / 2))
        # Load the config
        config = get_config(target)
        self.config = config
        load_modules(config)
        self.modules = config.get_value('modules')

        # Contact Email
        self.smtp_from = config.get_value('smtp-from')

        # Full-text indexing
        self.index_text =  config.get_value('index-text', type=Boolean,
                                            default=True)
        # Accept cors
        self.accept_cors = config.get_value(
            'accept-cors', type=Boolean, default=False)

        # Profile Memory
        if profile_space is True:
            import guppy.heapy.RM

        # The database
        if cache_size is None:
            cache_size = config.get_value('database-size')
        if ':' in cache_size:
            size_min, size_max = cache_size.split(':')
        else:
            size_min = size_max = cache_size
        size_min, size_max = int(size_min), int(size_max)
        read_only = read_only or config.get_value('database-readonly')
        database = get_database(target, size_min, size_max, read_only)
        self.database = database

        # Find out the root class
        root = get_root(database)

        # Load environment file
        root_file_path = inspect.getfile(root.__class__)
        environement_path = str(get_reference(root_file_path).resolve('environment.json'))
        if vfs.exists(environement_path):
            with open(environement_path, 'r') as f:
                data = f.read()
                self.environment = json.loads(data)

        # Init fake context
        context = get_fake_context(database, root.context_cls)
        context.server = self

        # Initialize
        access_log = '%s/log/access' % target
        super(Server, self).__init__(root, access_log=access_log)

        # Email service
        self.spool = lfs.resolve2(self.target, 'spool')
        spool_failed = '%s/failed' % self.spool
        if not lfs.exists(spool_failed):
            lfs.make_folder(spool_failed)
        # Configuration variables
        get_value = config.get_value
        self.smtp_host = get_value('smtp-host')
        self.smtp_login = get_value('smtp-login', default='').strip()
        self.smtp_password = get_value('smtp-password', default='').strip()
        # Email is sent asynchronously
        self.flush_spool()

        # Logging
        log_file = '%s/log/events' % target
        log_level = config.get_value('log-level')
        if log_level not in log_levels:
            msg = 'configuraion error, unexpected "%s" value for log-level'
            raise ValueError, msg % log_level
        log_level = log_levels[log_level]
        logger = Logger(log_file, log_level, rotate=timedelta(weeks=3))
        register_logger(logger, None)
        logger = WebLogger(log_file, log_level)
        register_logger(logger, 'itools.web')
        # Session timeout
        self.session_timeout = get_value('session-timeout')
        # Register routes
        self.register_dispatch_routes()


    def check_consistency(self, quick):
        # Check the server is not running
        pid = get_pid('%s/pid' % self.target)
        if pid is not None:
            print '[%s] The Web Server is already running.' % self.target
            return False

        # Check for database consistency
        if quick is False and check_database(self.target) is False:
            return False

        # Check instance is up to date
        if not is_instance_up_to_date(self.target):
            print 'The instance is not up-to-date, please type:'
            print
            print '    $ icms-update.py %s' % self.target
            print
            return False
        return True


    def start(self, detach=False, profile=False, loop=True):
        profile = ('%s/log/profile' % self.target) if profile else None
        self.loop = ServerLoop(
              target=self.target,
              server=self,
              profile=profile)
        # Daemon mode
        if detach:
            become_daemon()

        # Update Git tree-cache, to speed things up
        self.database.worktree.update_tree_cache()

        # Find out the IP to listen to
        address = self.config.get_value('listen-address').strip()
        if not address:
            raise ValueError, 'listen-address is missing from config.conf'
        if address == '*':
            address = None

        # Find out the port to listen
        port = self.config.get_value('listen-port')
        if port is None:
            raise ValueError, 'listen-port is missing from config.conf'

        # Listen & set context
        root = self.root
        self.listen(address, port)

        # Call method on root at start
        context = get_context()
        root.launch_at_start(context)

        # Set cron interval
        interval = self.config.get_value('cron-interval')
        if interval:
            cron(self.cron_manager, interval)

        # Init loop
        if loop:
            try:
                self.loop.run()
            except KeyboardInterrupt:
                self.close()
        # Ok
        return True


    def reindex_catalog(self, quiet=False, quick=False, as_test=False):
        if self.is_running_in_rw_mode():
            print 'Cannot proceed, the server is running in read-write mode.'
            return
        # Check for database consistency
        if quick is False and check_database(self.target) is False:
            return False
        # Create a temporary new catalog
        catalog_path = '%s/catalog.new' % self.target
        if lfs.exists(catalog_path):
            lfs.remove(catalog_path)
        catalog = make_catalog(catalog_path, get_register_fields())

        # Get the root
        root = self.root

        # Build a fake context
        context = self.get_fake_context()

        # Update
        t0, v0 = time(), vmsize()
        doc_n = 0
        error_detected = False
        if as_test:
            log = open('%s/log/update-catalog' % self.target, 'w').write
        for obj in root.traverse_resources():
            if not isinstance(obj, Resource):
                continue
            if not quiet:
                print doc_n, obj.abspath
            doc_n += 1
            context.resource = obj

            # Index the document
            try:
                catalog.index_document(obj)
            except Exception:
                if as_test:
                    error_detected = True
                    log('*** Error detected ***\n')
                    log('Abspath of the resource: %r\n\n' % str(obj.abspath))
                    log(format_exc())
                    log('\n')
                else:
                    raise

            # Free Memory
            del obj
            self.database.make_room()

        if not error_detected:
            if as_test:
                # Delete the empty log file
                remove('%s/log/update-catalog' % self.target)

            # Update / Report
            t1, v1 = time(), vmsize()
            v = (v1 - v0)/1024
            print '[Update] Time: %.02f seconds. Memory: %s Kb' % (t1 - t0, v)
            # Commit
            print '[Commit]',
            sys.stdout.flush()
            catalog.save_changes()
            # Commit / Replace
            old_catalog_path = '%s/catalog' % self.target
            if lfs.exists(old_catalog_path):
                lfs.remove(old_catalog_path)
            lfs.move(catalog_path, old_catalog_path)
            # Commit / Report
            t2, v2 = time(), vmsize()
            v = (v2 - v1)/1024
            print 'Time: %.02f seconds. Memory: %s Kb' % (t2 - t1, v)
            return True
        else:
            print '[Update] Error(s) detected, the new catalog was NOT saved'
            print ('[Update] You can find more infos in %r' %
                   join(self.target, 'log/update-catalog'))
            return False


    def get_pid(self):
        return get_pid('%s/pid' % self.target)


    def is_running(self):
        pid = self.get_pid()
        return pid_exists(pid)


    def stop(self, force=False):
        proxy = super(Server, self)
        proxy.stop()
        print 'Stoping server...'
        self.kill(force)


    def kill(self, force=False):
        pid = get_pid('%s/pid' % self.target)
        if pid is None:
            print '[%s] Web Server not running.' % self.target
        else:
            signal = SIGTERM if force else SIGINT
            kill(pid, signal)
            if force:
                print '[%s] Web Server shutting down...' % self.target
            else:
                print '[%s] Web Server shutting down (gracefully)...' % self.target


    def listen(self, address, port):
        super(Server, self).listen(address, port)
        self.port = port



    def save_running_informations(self):
        # Save server running informations
        kw = {'pid': getpid(),
              'target': self.target,
              'read_only': self.read_only}
        data = pickle.dumps(kw)
        with open(self.target + '/running', 'w') as output_file:
            output_file.write(data)


    def get_running_informations(self):
        try:
            with open(self.target + '/running', 'r') as output_file:
                data = output_file.read()
                return pickle.loads(data)
        except IOError:
            return None


    def is_running_in_rw_mode(self, mode='running'):
        is_running = self.is_running()
        if not is_running:
            return False
        if mode == 'request':
            address = self.config.get_value('listen-address').strip()
            if address == '*':
                address = '127.0.0.1'
            port = self.config.get_value('listen-port')

            url = 'http://%s:%s/;_ctrl' % (address, port)
            try:
                h = vfs.open(url)
            except GError:
                # The server is not running
                return False
            data = h.read()
            return json.loads(data)['read-only'] is False
        elif mode == 'running':
            kw = self.get_running_informations()
            return not kw.get('read_only', False)


    #######################################################################
    # Mailing
    #######################################################################
    def get_spool_size(self):
        spool = lfs.open(self.spool)
        # We have always a 'failed' directory => "-1"
        return len(spool.get_names()) - 1


    def save_email(self, message):
        # Check the SMTP host is defined
        if not self.smtp_host:
            raise ValueError, '"smtp-host" is not set in config.conf'

        spool = lfs.resolve2(self.target, 'spool')
        tmp_file, tmp_path = mkstemp(dir=spool)
        file = fdopen(tmp_file, 'w')
        try:
            file.write(message.as_string())
        finally:
            file.close()


    def flush_spool(self):
        cron(self._smtp_send, timedelta(seconds=1))


    def send_email(self, message):
        self.save_email(message)
        self.flush_spool()


    def _smtp_send(self):
        nb_max_mails_to_send = 2
        spool = lfs.open(self.spool)

        def get_names():
            # Find out emails to send
            locks = set()
            names = set()
            for name in spool.get_names():
                if name == 'failed':
                    # Skip "failed" special directory
                    continue
                if name[-5:] == '.lock':
                    locks.add(name[:-5])
                else:
                    names.add(name)
            names.difference_update(locks)
            return names

        # Send emails
        names = get_names()
        smtp_host = self.smtp_host
        for name in list(names)[:nb_max_mails_to_send]:
            # 1. Open connection
            try:
                smtp = SMTP(smtp_host)
            except gaierror, excp:
                log_warning('%s: "%s"' % (excp[1], smtp_host))
                return 60 # 1 minute
            except Exception:
                self.smtp_log_error()
                return 60 # 1 minute
            log_info('CONNECTED to %s' % smtp_host)

            # 2. Login
            if self.smtp_login and self.smtp_password:
                smtp.login(self.smtp_login, self.smtp_password)

            # 3. Send message
            try:
                message = spool.open(name).read()
                headers = HeaderParser().parsestr(message)
                subject = headers['subject']
                from_addr = headers['from']
                to_addr = headers['to']
                smtp.sendmail(from_addr, to_addr, message)
                # Remove
                spool.remove(name)
                # Log
                log_msg = 'Email "%s" sent from "%s" to "%s"'
                log_info(log_msg % (subject, from_addr, to_addr))
            except SMTPRecipientsRefused:
                # The recipient addresses has been refused
                self.smtp_log_error()
                spool.move(name, 'failed/%s' % name)
            except SMTPResponseException, excp:
                # The SMTP server returns an error code
                self.smtp_log_error()
                spool.move(name, 'failed/%s_%s' % (excp.smtp_code, name))
Esempio n. 18
0
def stop_server(target):
    msg = 'Stoping server...'
    log_info(msg)
    pid = get_pid('%s/pid' % target)
    if pid:
        kill(pid, SIGTERM)
Esempio n. 19
0
        exit(1)
    config.options = options

    # Case 0: Nothing, print help
    if not args:
        print 'Usage:', usage
        print
        print 'Modules:'
        print
        for name in sorted(modules):
            module = modules[name]
            if module.class_title:
                print u'  %s: %s' % (name, module.class_title)
        exit(0)

    log_info('> Command : ' + ' '.join(args))

    # Get the module
    module_name, args = args[0], args[1:]
    module = modules.get(module_name)
    if not module or not module.class_title:
        print 'Error: unexpected "%s" module' % module_name
        exit(1)

    # Update configuration
    if module_name == 'config':
        config.action_update()
        exit(1)

    # Case 1: Just the module, print help
    if not args:
Esempio n. 20
0
    def reindex_catalog(self, quiet=False, quick=False, as_test=False):
        # FIXME: should be moved into backend
        from itools.database.backends.catalog import make_catalog
        msg = 'reindex catalog %s %s %s' % (quiet, quick, as_test)
        log_info(msg)
        if self.is_running_in_rw_mode():
            print 'Cannot proceed, the server is running in read-write mode.'
            return
        # Create a temporary new catalog
        catalog_path = '%s/catalog.new' % self.target
        if lfs.exists(catalog_path):
            lfs.remove(catalog_path)
        catalog = make_catalog(catalog_path, get_register_fields())
        # Get the root
        root = self.root
        # Update
        t0, v0 = time(), vmsize()
        doc_n = 0
        error_detected = False
        if as_test:
            log = open('%s/log/update-catalog' % self.target, 'w').write
        with self.database.init_context() as context:
            for obj in root.traverse_resources():
                if not quiet or doc_n % 10000==0:
                    print('{0} {1}'.format(doc_n, obj.abspath))
                doc_n += 1
                context.resource = obj
                values = obj.get_catalog_values()
                # Index the document
                try:
                    catalog.index_document(values)
                except Exception:
                    if as_test:
                        error_detected = True
                        log('*** Error detected ***\n')
                        log('Abspath of the resource: %r\n\n' % str(obj.abspath))
                        log(format_exc())
                        log('\n')
                    else:
                        raise
                # Free Memory
                del obj
                self.database.make_room()

        if not error_detected:
            if as_test:
                # Delete the empty log file
                remove('%s/log/update-catalog' % self.target)

            # Update / Report
            t1, v1 = time(), vmsize()
            v = (v1 - v0)/1024
            print '[Update] Time: %.02f seconds. Memory: %s Kb' % (t1 - t0, v)
            # Commit
            print '[Commit]',
            sys.stdout.flush()
            catalog.save_changes()
            catalog.close()
            # Commit / Replace
            old_catalog_path = '%s/catalog' % self.target
            if lfs.exists(old_catalog_path):
                lfs.remove(old_catalog_path)
            lfs.move(catalog_path, old_catalog_path)
            # Commit / Report
            t2, v2 = time(), vmsize()
            v = (v2 - v1)/1024
            print 'Time: %.02f seconds. Memory: %s Kb' % (t2 - t1, v)
            return True
        else:
            print '[Update] Error(s) detected, the new catalog was NOT saved'
            print ('[Update] You can find more infos in %r' %
                   join(self.target, 'log/update-catalog'))
            return False
Esempio n. 21
0
 def close(self):
     log_info('Close server')
     self.database.close()
Esempio n. 22
0
 def index_document(self, document):
     self.nb_changes += 1
     abspath, term, xdoc = self.get_xdoc_from_document(document)
     self._db.replace_document(term, xdoc)
     if self.logger:
         log_info(abspath, domain='itools.catalog')