Example #1
0
    def get_pg_environment(self):
        if not self.is_using_postgresql():
            logger.warn("Only PostgreSQL databases are supported right now.")
        # rip additional :port from hostName, but allow occurrence of plain
        # ipv6 address between []-brackets (simply assume [ipv6::] when ']' is
        # found in string (also see JDBCDataStoreConfiguration in MxRuntime)
        host = self._conf['mxruntime']['DatabaseHost']
        port = "5432"
        ipv6end = host.rfind(']')
        lastcolon = host.rfind(':')
        if ipv6end != -1 and lastcolon > ipv6end:
            # "]" found and ":" exists after the "]"
            port = host[lastcolon + 1:]
            host = host[1:ipv6end]
        elif ipv6end != -1:
            # "]" found but no ":" exists after the "]"
            host = host[1:ipv6end]
        elif ipv6end == -1 and lastcolon != -1:
            # no "]" found and ":" exists, simply split on ":"
            port = host[lastcolon + 1:]
            host = host[:lastcolon]

        # TODO: sanity checks
        pg_env = {
            'PGHOST': host,
            'PGPORT': port,
            'PGUSER': self._conf['mxruntime']['DatabaseUserName'],
            'PGPASSWORD': self._conf['mxruntime']['DatabasePassword'],
            'PGDATABASE': self._conf['mxruntime']['DatabaseName'],
        }
        logger.trace("PostgreSQL environment variables: %s" % str(pg_env))
        return pg_env
Example #2
0
def complete_unpack(model_upload_path, text):
    logger.trace("complete_unpack: Looking for %s in %s" %
                 (text, model_upload_path))
    return [f for f in os.listdir(model_upload_path)
            if os.path.isfile(os.path.join(model_upload_path, f))
            and f.startswith(text)
            and (f.endswith(".zip") or f.endswith(".mda"))]
Example #3
0
def restoredb(config, dump_name):

    if not config.allow_destroy_db():
        logger.error(
            "Refusing to do a destructive database operation "
            "because the allow_destroy_db configuration option "
            "is set to false."
        )
        return False

    env = os.environ.copy()
    env.update(config.get_pg_environment())

    db_dump_file_name = os.path.join(config.get_database_dump_path(), dump_name)
    if not os.path.isfile(db_dump_file_name):
        logger.error("file %s does not exist: " % db_dump_file_name)
        return False

    logger.debug("Restoring %s" % db_dump_file_name)
    cmd = (config.get_pg_restore_binary(), "-d", env["PGDATABASE"], "-O", "-n", "public", "-x", db_dump_file_name)
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = proc.communicate()

    if stderr != "":
        logger.error("An error occured while calling pg_restore: %s " % stderr)
        return False

    return True
Example #4
0
def restoredb(config, dump_name):

    if not config.allow_destroy_db():
        logger.error("Refusing to do a destructive database operation "
                     "because the allow_destroy_db configuration option "
                     "is set to false.")
        return False

    env = os.environ.copy()
    env.update(config.get_pg_environment())

    db_dump_file_name = os.path.join(config.get_database_dump_path(),
                                     dump_name)
    if not os.path.isfile(db_dump_file_name):
        logger.error("file %s does not exist: " % db_dump_file_name)
        return False

    logger.debug("Restoring %s" % db_dump_file_name)
    cmd = (config.get_pg_restore_binary(), "-d", env['PGDATABASE'], "-O", "-n",
           "public", "-x", db_dump_file_name)
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd,
                            env=env,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    (stdout, stderr) = proc.communicate()

    if stderr != '':
        logger.error("An error occured while calling pg_restore: %s " % stderr)
        return False

    return True
Example #5
0
    def simplify(self):
        """ Remove empty blocks and edges"""
        # remove blocks with no statement
        for block in self.blocks():
            for edge in block.succ:
                while (not edge.tail.statements or all(
                        isinstance(s, Nop)
                        for s in edge.tail.statements)) and len(
                            edge.tail.succ) == 1:
                    logger.trace('remove block {}'.format(edge.tail))
                    empty = edge.tail
                    edge.tail = edge.tail.succ[0].tail
                    del empty

        self.update_pred()
        # remove single in single out edge
        for block in self.blocks():
            while len(block.succ
                      ) == 1 and block.succ[0].tail != self.root and len(
                          self.pred[block.succ[0].tail]) == 1:
                merged = block.succ[0].tail
                logger.debug('merge edge {} {}'.format(block, block.succ[0]))
                block.statements += merged.statements
                block.succ = merged.succ
                del merged
        self.update_pred()
Example #6
0
    def get_java_cmd(self):
        """
        Build complete JVM startup command line
        """
        cmd = ['java']
        if 'javaopts' in self._conf['m2ee']:
            if isinstance(self._conf['m2ee']['javaopts'], list):
                cmd.extend(self._conf['m2ee']['javaopts'])
            else:
                logger.warn("javaopts option in m2ee section in configuration "
                            "is not a list")
        if self._classpath:
            cmd.extend(['-cp', self._classpath])

            if self.runtime_version >= 5:
                cmd.append('-Dfelix.config.properties=file:%s' % self.get_felix_config_file())

            cmd.append( self._get_appcontainer_mainclass())
        elif self._appcontainer_version:
            cmd.extend(['-jar', self._appcontainer_jar])
        else:
            logger.critical("Unable to determine JVM startup parameters.")
            return None

        logger.trace("Command line to be used when starting the JVM: %s" %
                     ' '.join(cmd))
        return cmd
Example #7
0
def restoredb(pg_env, pg_restore_binary, database_dump_path, dump_name):

    env = os.environ.copy()
    env.update(pg_env)

    answer = raw_input("This command will restore this dump into database %s. "
                       "Continue? (y)es, (N)o? " % env['PGDATABASE'])
    if answer != 'y':
        logger.info("Aborting!")
        return

    db_dump_file_name = os.path.join(database_dump_path, dump_name)
    if not os.path.isfile(db_dump_file_name):
        logger.error("file %s does not exist: " % db_dump_file_name)
        return

    logger.debug("Restoring %s" % db_dump_file_name)
    cmd = (pg_restore_binary, "-d", env['PGDATABASE'], "-O", "-x",
           db_dump_file_name)
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    (stdout, stderr) = proc.communicate()

    if stderr != '':
        logger.error("An error occured while calling pg_restore: %s " % stderr)
        return
Example #8
0
    def get_pg_environment(self):
        if not self.is_using_postgresql():
            logger.warn("Only PostgreSQL databases are supported right now.")
        # rip additional :port from hostName, but allow occurrence of plain
        # ipv6 address between []-brackets (simply assume [ipv6::] when ']' is
        # found in string (also see JDBCDataStoreConfiguration in MxRuntime)
        host = self._conf['mxruntime']['DatabaseHost']
        port = "5432"
        ipv6end = host.rfind(']')
        lastcolon = host.rfind(':')
        if ipv6end != -1 and lastcolon > ipv6end:
            # "]" found and ":" exists after the "]"
            port = host[lastcolon + 1:]
            host = host[1:ipv6end]
        elif ipv6end != -1:
            # "]" found but no ":" exists after the "]"
            host = host[1:ipv6end]
        elif ipv6end == -1 and lastcolon != -1:
            # no "]" found and ":" exists, simply split on ":"
            port = host[lastcolon + 1:]
            host = host[:lastcolon]

        # TODO: sanity checks
        pg_env = {
            'PGHOST': host,
            'PGPORT': port,
            'PGUSER': self._conf['mxruntime']['DatabaseUserName'],
            'PGPASSWORD': self._conf['mxruntime']['DatabasePassword'],
            'PGDATABASE': self._conf['mxruntime']['DatabaseName'],
        }
        logger.trace("PostgreSQL environment variables: %s" % str(pg_env))
        return pg_env
Example #9
0
    def start(self, timeout=60, step=0.25):
        if self.check_pid():
            logger.error("The application process is already started!")
            return False

        cmd = self._config.get_java_cmd()
        env = self._config.get_java_env()

        try:
            logger.trace("[%s] Forking now..." % os.getpid())
            pid = os.fork()
            if pid > 0:
                self._pid = None
                logger.trace("[%s] Waiting for intermediate process to "
                             "exit..." % os.getpid())
                # prevent zombie process
                (waitpid, result) = os.waitpid(pid, 0)
                if result == 0:
                    logger.debug("The JVM process has been started.")
                    return True
                logger.error("Starting the JVM process did not succeed...")
                return False
        except OSError, e:
            logger.error("Forking subprocess failed: %d (%s)\n" %
                         (e.errno, e.strerror))
            return
Example #10
0
    def start(self, timeout=60, step=0.25):
        if self.check_pid():
            logger.error("The application process is already started!")
            return False

        env = self._config.get_java_env()
        cmd = self._config.get_java_cmd()

        try:
            logger.trace("[%s] Forking now..." % os.getpid())
            pid = os.fork()
            if pid > 0:
                self._pid = None
                logger.trace("[%s] Waiting for intermediate process to "
                             "exit..." % os.getpid())
                # prevent zombie process
                (waitpid, result) = os.waitpid(pid, 0)
                if result == 0:
                    logger.debug("The JVM process has been started.")
                    return True
                logger.error("Starting the JVM process did not succeed...")
                return False
        except OSError, e:
            logger.error("Forking subprocess failed: %d (%s)\n" %
                         (e.errno, e.strerror))
            return
Example #11
0
 def ping(self, timeout=5):
     try:
         response = self.request("echo", {"echo": "ping"}, timeout)
         if response.get_result() == 0:
             return True
     except AttributeError, e:
         # httplib 0.6 throws AttributeError: 'NoneType' object has no
         # attribute 'makefile' in case of a connection refused :-|
         logger.trace("Got %s: %s" % (type(e), e))
Example #12
0
 def ping(self, timeout=5):
     try:
         response = self.request("echo", {"echo": "ping"}, timeout)
         if response.get_result() == 0:
             return True
     except AttributeError, e:
         # httplib 0.6 throws AttributeError: 'NoneType' object has no
         # attribute 'makefile' in case of a connection refused :-|
         logger.trace("Got %s: %s" % (type(e), e))
Example #13
0
    def send_runtime_config(self, database_password=None):
        # send runtime configuration
        # catch and report:
        # - configuration errors (X is not a file etc)
        # XXX: fix mxruntime to report all errors and warnings in adminaction
        # feedback instead of stopping to process input
        # if errors, abort.

        config = copy.deepcopy(self.config.get_runtime_config())
        if database_password:
            config['DatabasePassword'] = database_password

        custom_config_25 = None
        if self.config.get_runtime_version() // '2.5':
            custom_config_25 = config.pop('MicroflowConstants', None)

        # convert MyScheduledEvents from list to dumb comma separated string if
        # needed:
        if isinstance(config.get('MyScheduledEvents', None), list):
            logger.trace("Converting mxruntime MyScheduledEvents from list to "
                         "comma separated string...")
            config['MyScheduledEvents'] = ','.join(config['MyScheduledEvents'])

        # convert certificate options from list to dumb comma separated string
        # if needed:
        for option in ('CACertificates', 'ClientCertificates',
                       'ClientCertificatePasswords'):
            if isinstance(config.get(option, None), list):
                logger.trace("Converting mxruntime %s from list to comma "
                             "separated string..." % option)
                config[option] = ','.join(config[option])

        logger.debug("Sending MxRuntime configuration...")
        logger.debug(str(config))
        m2eeresponse = self.client.update_configuration(config)
        result = m2eeresponse.get_result()
        if result == 1:
            logger.error("Sending configuration failed: %s" %
                         m2eeresponse.get_cause())
            logger.error("You'll have to fix the configuration and run start "
                         "again...")
            return False

        # if running 2.5.x we send the MicroflowConstants via
        # update_custom_configuration
        if custom_config_25:
            logger.debug("Sending 2.5.x custom configuration...")
            m2eeresponse = self.client.update_custom_configuration(
                custom_config_25)
            result = m2eeresponse.get_result()
            if result == 1:
                logger.error("Sending custom configuration failed: %s" %
                             m2eeresponse.get_cause())
                return False

        return True
Example #14
0
    def send_runtime_config(self, database_password=None):
        # send runtime configuration
        # catch and report:
        # - configuration errors (X is not a file etc)
        # XXX: fix mxruntime to report all errors and warnings in adminaction
        # feedback instead of stopping to process input
        # if errors, abort.

        config = copy.deepcopy(self.config.get_runtime_config())
        if database_password:
            config['DatabasePassword'] = database_password

        custom_config_25 = None
        if self.config.get_runtime_version() // '2.5':
            custom_config_25 = config.pop('MicroflowConstants', None)

        # convert MyScheduledEvents from list to dumb comma separated string if
        # needed:
        if isinstance(config.get('MyScheduledEvents', None), list):
            logger.trace("Converting mxruntime MyScheduledEvents from list to "
                         "comma separated string...")
            config['MyScheduledEvents'] = ','.join(config['MyScheduledEvents'])

        # convert certificate options from list to dumb comma separated string
        # if needed:
        for option in ('CACertificates', 'ClientCertificates',
                       'ClientCertificatePasswords'):
            if isinstance(config.get(option, None), list):
                logger.trace("Converting mxruntime %s from list to comma "
                             "separated string..." % option)
                config[option] = ','.join(config[option])

        logger.debug("Sending MxRuntime configuration...")
        m2eeresponse = self.client.update_configuration(config)
        result = m2eeresponse.get_result()
        if result == 1:
            logger.error("Sending configuration failed: %s" %
                         m2eeresponse.get_cause())
            logger.error("You'll have to fix the configuration and run start "
                         "again...")
            return False

        # if running 2.5.x we send the MicroflowConstants via
        # update_custom_configuration
        if custom_config_25:
            logger.debug("Sending 2.5.x custom configuration...")
            m2eeresponse = self.client.update_custom_configuration(
                custom_config_25)
            result = m2eeresponse.get_result()
            if result == 1:
                logger.error("Sending custom configuration failed: %s" %
                             m2eeresponse.get_cause())
                return False

        return True
Example #15
0
 def check_pid(self, pid=None):
     if pid is None:
         pid = self.get_pid()
     if not pid:
         return False
     try:
         os.kill(pid, 0)  # doesn't actually kill process
         return True
     except OSError:
         logger.trace("No process with pid %s, or not ours." % pid)
         return False
Example #16
0
 def check_pid(self, pid=None):
     if pid is None:
         pid = self.get_pid()
     if not pid:
         return False
     try:
         os.kill(pid, 0)  # doesn't actually kill process
         return True
     except OSError:
         logger.trace("No process with pid %s, or not ours." % pid)
         return False
Example #17
0
    def get_java_env(self):
        env = {}

        preserve_environment = self._conf['m2ee'].get('preserve_environment',
                                                      False)
        if preserve_environment is True:
            env = os.environ.copy()
        elif preserve_environment is False:
            pass
        elif type(preserve_environment) == list:
            for varname in preserve_environment:
                if varname in os.environ:
                    env[varname] = os.environ[varname]
                else:
                    logger.warn("preserve_environment variable %s is not "
                                "present in os.environ" % varname)
        else:
            logger.warn("preserve_environment is not a boolean or list")

        custom_environment = self._conf['m2ee'].get('custom_environment', {})
        if custom_environment is not None:
            if type(custom_environment) == dict:
                env.update(custom_environment)
            else:
                logger.warn("custom_environment option in m2ee section in "
                            "configuration is not a dictionary")

        env.update({
            'M2EE_ADMIN_PORT': str(self._conf['m2ee']['admin_port']),
            'M2EE_ADMIN_PASS': str(self._conf['m2ee']['admin_pass']),
            # only has effect with Mendix >= 4.3, but include anyway as
            # it does not break earlier versions
            'M2EE_ADMIN_LISTEN_ADDRESSES': str(
                self._conf['m2ee']['admin_listen_addresses']),
            'M2EE_RUNTIME_LISTEN_ADDRESSES': str(
                self._conf['m2ee']['runtime_listen_addresses']),
        })

        # only add RUNTIME environment variables when using default
        # appcontainer from runtime distro
        if not self._appcontainer_version and self.runtime_version < 5:
            env['M2EE_RUNTIME_PORT'] = str(self._conf['m2ee']['runtime_port'])
            if 'runtime_blocking_connector' in self._conf['m2ee']:
                env['M2EE_RUNTIME_BLOCKING_CONNECTOR'] = str(
                    self._conf['m2ee']['runtime_blocking_connector'])

        if 'monitoring_pass' in self._conf['m2ee']:
            env['M2EE_MONITORING_PASS'] = str(
                self._conf['m2ee']['monitoring_pass'])

        logger.trace("Environment to be used when starting the JVM: %s" % env)
        return env
Example #18
0
 def request(self, action, params=None, timeout=None):
     body = {"action": action}
     if params:
         body["params"] = params
     body = json.dumps(body)
     h = httplib2.Http(timeout=timeout)  # httplib does not like os.fork
     logger.trace("M2EE request body: %s" % body)
     (response_headers, response_body) = h.request(self._url, "POST", body, headers=self._headers)
     if response_headers["status"] == "200":
         logger.trace("M2EE response: %s" % response_body)
         return M2EEResponse(action, json.loads(response_body))
     else:
         logger.error("non-200 http status code: %s %s" % (response_headers, response_body))
Example #19
0
def dumpdb(pg_env, pg_dump_binary, database_dump_path):

    env = os.environ.copy()
    env.update(pg_env)

    db_dump_file_name = os.path.join(database_dump_path,
                                     "%s_%s.backup" %
                                     (env['PGDATABASE'],
                                         time.strftime("%Y%m%d_%H%M%S")))

    logger.info("Writing database dump to %s" % db_dump_file_name)
    cmd = (pg_dump_binary, "-O", "-x", "-F", "c")
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd, env=env, stdout=open(db_dump_file_name, 'w+'))
    proc.communicate()
Example #20
0
 def request(self, action, params=None, timeout=None):
     body = {"action": action}
     if params:
         body["params"] = params
     body = json.dumps(body)
     h = httplib2.Http(timeout=timeout, proxy_info=None)  # httplib does not like os.fork
     logger.trace("M2EE request body: %s" % body)
     (response_headers, response_body) = h.request(self._url, "POST", body,
                                                   headers=self._headers)
     if (response_headers['status'] == "200"):
         logger.trace("M2EE response: %s" % response_body)
         return M2EEResponse(action, json.loads(response_body))
     else:
         logger.error("non-200 http status code: %s %s" %
                      (response_headers, response_body))
Example #21
0
def dumpdb(config, name=None):

    env = os.environ.copy()
    env.update(config.get_pg_environment())

    if name is None:
        name = "%s_%s.backup" % (env["PGDATABASE"], time.strftime("%Y%m%d_%H%M%S"))

    db_dump_file_name = os.path.join(config.get_database_dump_path(), name)

    logger.info("Writing database dump to %s" % db_dump_file_name)
    cmd = (config.get_pg_dump_binary(), "-O", "-x", "-F", "c")
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd, env=env, stdout=open(db_dump_file_name, "w+"))
    proc.communicate()
Example #22
0
 def _wait_pid(self, timeout=None, step=0.25):
     logger.trace("Waiting for process to disappear: timeout=%s" % timeout)
     if self.check_pid():
         if timeout is None:
             return False
         t = 0
         while t < timeout:
             sleep(step)
             if not self.check_pid():
                 break
             t += step
         if t >= timeout:
             logger.trace("Timeout: Process %s takes too long to " "disappear." % self._pid)
             return False
     self.cleanup_pid()
     return True
Example #23
0
def dumpdb(config, name=None):

    env = os.environ.copy()
    env.update(config.get_pg_environment())

    if name is None:
        name = ("%s_%s.backup" %
                (env['PGDATABASE'], time.strftime("%Y%m%d_%H%M%S")))

    db_dump_file_name = os.path.join(config.get_database_dump_path(), name)

    logger.info("Writing database dump to %s" % db_dump_file_name)
    cmd = (config.get_pg_dump_binary(), "-O", "-x", "-F", "c")
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd, env=env, stdout=open(db_dump_file_name, 'w+'))
    proc.communicate()
Example #24
0
 def _wait_pid(self, timeout=None, step=0.25):
     logger.trace("Waiting for process to disappear: timeout=%s" % timeout)
     if self.check_pid():
         if timeout is None:
             return False
         t = 0
         while t < timeout:
             sleep(step)
             if not self.check_pid():
                 break
             t += step
         if t >= timeout:
             logger.trace("Timeout: Process %s takes too long to "
                          "disappear." % self._pid)
             return False
     self.cleanup_pid()
     return True
Example #25
0
 def request(self, action, params=None, timeout=None):
     body = {"action": action}
     if params:
         body["params"] = params
     body = json.dumps(body)
     # there are no parallel requests done, so we mess with socket timeout
     # right before the request
     socket.setdefaulttimeout(timeout)
     h = httplib2.Http()  # httplib does not like os.fork
     logger.trace("M2EE request body: %s" % body)
     (response_headers, response_body) = h.request(self._url, "POST", body,
                                                   headers=self._headers)
     if (response_headers['status'] == "200"):
         logger.trace("M2EE response: %s" % response_body)
         return M2EEResponse(action, json.loads(response_body))
     else:
         logger.error("non-200 http status code: %s %s" %
                      (response_headers, response_body))
Example #26
0
def check_download_runtime_existence(url):
    h = httplib2.Http(timeout=10)
    logger.debug("Checking for existence of %s via HTTP HEAD" % url)
    try:
        (response_headers, response_body) = h.request(url, "HEAD")
        logger.trace("Response headers: %s" % response_headers)
    except (httplib2.HttpLib2Error, httplib.HTTPException,
            socket.error) as e:
        logger.error("Checking download url %s failed: %s: %s"
                     % (url, e.__class__.__name__, e))
        return False

    if (response_headers['status'] == '200'):
        logger.debug("Ok, got HTTP 200")
        return True
    if (response_headers['status'] == '404'):
        logger.error("The location %s cannot be found." % url)
        return False
    logger.error("Checking download url %s failed, HTTP status code %s"
                 % (url, response_headers['status']))
    return False
Example #27
0
def unpack(config, mda_name):

    mda_file_name = os.path.join(config.get_model_upload_path(), mda_name)
    if not os.path.isfile(mda_file_name):
        logger.error("file %s does not exist" % mda_file_name)
        return False

    logger.debug("Testing archive...")
    cmd = ("unzip", "-tqq", mda_file_name)
    logger.trace("Executing %s" % str(cmd))
    try:
        proc = subprocess.Popen(cmd,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        (stdout, stderr) = proc.communicate()

        if proc.returncode != 0:
            logger.error("An error occured while testing archive "
                         "consistency:")
            logger.error("stdout: %s" % stdout)
            logger.error("stderr: %s" % stderr)
            return False
        else:
            logger.trace("stdout: %s" % stdout)
            logger.trace("stderr: %s" % stderr)
    except OSError, ose:
        import errno
        if ose.errno == errno.ENOENT:
            logger.error("The unzip program could not be found: %s" %
                         ose.strerror)
        else:
            logger.error("An error occured while executing unzip: %s" % ose)
        return False
Example #28
0
def fix_mxclientsystem_symlink(config):
    logger.debug("Running fix_mxclientsystem_symlink...")
    mxclient_symlink = os.path.join(
        config.get_public_webroot_path(), 'mxclientsystem')
    logger.trace("mxclient_symlink: %s" % mxclient_symlink)
    real_mxclientsystem_path = config.get_real_mxclientsystem_path()
    logger.trace("real_mxclientsystem_path: %s" % real_mxclientsystem_path)
    if os.path.islink(mxclient_symlink):
        current_real_mxclientsystem_path = os.path.realpath(
            mxclient_symlink)
        if current_real_mxclientsystem_path != real_mxclientsystem_path:
            logger.debug("mxclientsystem symlink exists, but points "
                         "to %s" % current_real_mxclientsystem_path)
            logger.debug("redirecting symlink to %s" %
                         real_mxclientsystem_path)
            os.unlink(mxclient_symlink)
            os.symlink(real_mxclientsystem_path, mxclient_symlink)
    elif not os.path.exists(mxclient_symlink):
        logger.debug("creating mxclientsystem symlink pointing to %s" %
                     real_mxclientsystem_path)
        try:
            os.symlink(real_mxclientsystem_path, mxclient_symlink)
        except OSError, e:
            logger.error("creating symlink failed: %s" % e)
Example #29
0
class M2EEClient:
    def __init__(self, url, password):
        self._url = url
        self._headers = {
            'Content-Type': 'application/json',
            'X-M2EE-Authentication': b64encode(password)
        }

    def request(self, action, params=None, timeout=None):
        body = {"action": action}
        if params:
            body["params"] = params
        body = json.dumps(body)
        h = httplib2.Http(timeout=timeout,
                          proxy_info=None)  # httplib does not like os.fork
        logger.trace("M2EE request body: %s" % body)
        (response_headers, response_body) = h.request(self._url,
                                                      "POST",
                                                      body,
                                                      headers=self._headers)
        if (response_headers['status'] == "200"):
            logger.trace("M2EE response: %s" % response_body)
            return M2EEResponse(action, json.loads(response_body))
        else:
            logger.error("non-200 http status code: %s %s" %
                         (response_headers, response_body))

    def ping(self, timeout=5):
        try:
            response = self.request("echo", {"echo": "ping"}, timeout)
            if response.get_result() == 0:
                return True
        except AttributeError, e:
            # httplib 0.6 throws AttributeError: 'NoneType' object has no
            # attribute 'makefile' in case of a connection refused :-|
            logger.trace("Got %s: %s" % (type(e), e))
        except (socket.error, socket.timeout), e:
            logger.trace("Got %s: %s" % (type(e), e))
            logger.error("Got %s: %s" % (type(e), e))
            import traceback
            logger.error(traceback.format_exc())
Example #30
0
    def collapse_condition(self, root):
        if len(root.succ) == 2 and isinstance(root.statements[-1], Decision):
            true: Block = root.find_succ(True)
            false: Block = root.find_succ(False)
            if isinstance(false.statements[-1], Decision) and len(
                    self.pred[false]) == 1 and not isinstance(
                        false.statements[0], LoopBody):
                if false.find_succ(True) is true:
                    logger.trace('{} R or F -> T, Ff'.format(root))
                    return self.merge_decision, (root, false, 'or', [
                        Edge(true, True),
                        Edge(false.find_succ(False), False)
                    ])

                if false.find_succ(False) is true:
                    logger.trace('{} not R and F -> Ft, T'.format(root))
                    return self.merge_decision, (root, false, 'and', [
                        Edge(false.find_succ(True), True),
                        Edge(true, False)
                    ], True)

            if isinstance(true.statements[-1], Decision) and len(
                    self.pred[true]) == 1 and not isinstance(
                        true.statements[0], LoopBody):
                if true.find_succ(True) is false:
                    logger.trace('{} not R or T -> F, Tf'.format(root))
                    return self.merge_decision, (root, true, 'or', [
                        Edge(false, True),
                        Edge(true.find_succ(False), False)
                    ], True)

                if true.find_succ(False) is false:
                    logger.trace('{} R and T -> Tt, F'.format(root))
                    return self.merge_decision, (root, true, 'and', [
                        Edge(true.find_succ(True), True),
                        Edge(false, False)
                    ])
Example #31
0
     if pid > 0:
         self._pid = None
         logger.trace("[%s] Waiting for intermediate process to "
                      "exit..." % os.getpid())
         # prevent zombie process
         (waitpid, result) = os.waitpid(pid, 0)
         if result == 0:
             logger.debug("The JVM process has been started.")
             return True
         logger.error("Starting the JVM process did not succeed...")
         return False
 except OSError, e:
     logger.error("Forking subprocess failed: %d (%s)\n" %
                  (e.errno, e.strerror))
     return
 logger.trace("[%s] Now in intermediate forked process..." %
              os.getpid())
 # decouple from parent environment
 os.chdir("/")
 os.setsid()
 os.umask(0022)
 # start java subprocess (second fork)
 logger.trace("[%s] Starting the JVM..." % os.getpid())
 try:
     proc = subprocess.Popen(
         cmd,
         close_fds=True,
         cwd='/',
         env=env,
     )
 except OSError, ose:
     if ose.errno == errno.ENOENT:
Example #32
0
    def _merge_runtime_configuration(self):
        logger.debug("Merging runtime configuration...")

        config_json = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_file = os.path.join(self._conf['m2ee']['app_base'],
                                            'model',
                                            'config.json'
                                            )
            logger.trace("In DTAPMode %s, so loading configuration from %s" %
                         (self.get_dtap_mode(), config_json_file)
                         )
            config_json = self._try_load_json(config_json_file)

        # figure out which constants to use
        merge_constants = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_constants = config_json.get('Constants', {})
            logger.trace("In DTAPMode %s, so using Constants from "
                         "config.json: %s" %
                         (self.get_dtap_mode(), config_json_constants))
            merge_constants.update(config_json_constants)
        # custom yaml section can override defaults
        yaml_custom = self._conf.get('custom', {})
        if yaml_custom:
            logger.trace("Using constants from custom config section: %s" %
                         yaml_custom)
            merge_constants.update(yaml_custom)
        # 'MicroflowConstants' from runtime yaml section can override
        # default/custom
        yaml_mxruntime_mfconstants = (
            self._conf['mxruntime'].get('MicroflowConstants', {}))
        if yaml_mxruntime_mfconstants:
            logger.trace("Using constants from mxruntime/MicroflowConstants: "
                         "%s" % yaml_mxruntime_mfconstants)
            merge_constants.update(yaml_mxruntime_mfconstants)
        # merge all yaml runtime settings into config
        merge_config = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_configuration = config_json.get('Configuration', {})
            logger.trace("In DTAPMode %s, so seeding runtime configuration "
                         "with Configuration from config.json: %s" %
                         (self.get_dtap_mode(), config_json_configuration))
            merge_config.update(config_json_configuration)
        merge_config.update(self._conf['mxruntime'])
        logger.trace("Merging current mxruntime config into it... %s" %
                     self._conf['mxruntime'])
        # replace 'MicroflowConstants' with mfconstants we just figured out
        # before to prevent dict-deepmerge-problems
        merge_config['MicroflowConstants'] = merge_constants
        logger.trace("Replacing 'MicroflowConstants' with constants we just "
                     "figured out: %s" % merge_constants)
        # the merged result will be put back into self._conf['mxruntime']
        logger.debug("Merged runtime configuration: %s" % merge_config)
        return merge_config
Example #33
0
            if pid > 0:
                self._pid = None
                logger.trace("[%s] Waiting for intermediate process to "
                             "exit..." % os.getpid())
                # prevent zombie process
                (waitpid, result) = os.waitpid(pid, 0)
                if result == 0:
                    logger.debug("The JVM process has been started.")
                    return True
                logger.error("Starting the JVM process did not succeed...")
                return False
        except OSError, e:
            logger.error("Forking subprocess failed: %d (%s)\n" %
                         (e.errno, e.strerror))
            return
        logger.trace("[%s] Now in intermediate forked process..." %
                     os.getpid())
        # decouple from parent environment
        os.chdir("/")
        os.setsid()
        os.umask(0022)

        logger.debug("Environment to be used when starting the JVM: %s" %
                     ' '.join(["%s='%s'" % (k, v)
                               for k, v in env.iteritems()]))
        logger.debug("Command line to be used when starting the JVM: %s" %
                     ' '.join(cmd))

        # start java subprocess (second fork)
        logger.trace("[%s] Starting the JVM..." % os.getpid())
        try:
            proc = subprocess.Popen(
Example #34
0
        try:
            fd = open(jsonfile)
        except Exception, e:
            logger.debug("Error reading configuration file %s: %s; "
                         "ignoring..." % (jsonfile, e))
            return {}

        config = None
        try:
            config = json.load(fd)
        except Exception, e:
            logger.error("Error parsing configuration file %s: %s" %
                         (jsonfile, e))
            return {}

        logger.trace("contents read from %s: %s" % (jsonfile, config))
        return config

    def mtime_changed(self):
        for yamlfile, mtime in self._mtimes.iteritems():
            if os.stat(yamlfile)[8] != mtime:
                return True
        return False

    def dump(self):
        print(yaml.dump(self._conf))

    def _check_appcontainer_config(self):
        # did we load any configuration at all?
        if not self._conf:
            logger.critical("No configuration present. Please put a m2ee.yaml "
Example #35
0
    def _merge_microflow_constants(self):
        """
        3.0: config.json "contains the configuration settings of the active
        configuration (in the Modeler) at the time of deployment." It also
        contains default values for microflow constants. D/T configuration is
        not stored in the mdp anymore, so for D/T we need to insert it into
        the configuration we read from yaml (yay!)
        { "Configuration": { "key": "value", ... }, "Constants": {
        "Module.Constant": "value", ... } } also... move the custom section
        into the MicroflowConstants runtime config option where 3.0 now
        expects them to be! yay... (when running 2.5, the MicroflowConstants
        part of runtime config will be sent using the old
        update_custom_configuration m2ee api call. Fun!
        """

        logger.debug("Merging microflow constants configuration...")

        config_json = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_file = os.path.join(self._conf['m2ee']['app_base'],
                                            'model',
                                            'config.json'
                                            )
            logger.trace("In DTAPMode %s, so loading configuration from %s" %
                         (self.get_dtap_mode(), config_json_file)
                         )
            config_json = self._try_load_json(config_json_file)

        # figure out which constants to use
        merge_constants = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_constants = config_json.get('Constants', {})
            logger.trace("In DTAPMode %s, so using Constants from "
                         "config.json: %s" %
                         (self.get_dtap_mode(), config_json_constants))
            merge_constants.update(config_json_constants)
        # custom yaml section can override defaults
        yaml_custom = self._conf.get('custom', {})
        if yaml_custom:
            logger.trace("Using constants from custom config section: %s" %
                         yaml_custom)
            merge_constants.update(yaml_custom)
        # 'MicroflowConstants' from runtime yaml section can override
        # default/custom
        yaml_mxruntime_mfconstants = (
            self._conf['mxruntime'].get('MicroflowConstants', {}))
        if yaml_mxruntime_mfconstants:
            logger.trace("Using constants from mxruntime/MicroflowConstants: "
                         "%s" % yaml_mxruntime_mfconstants)
            merge_constants.update(yaml_mxruntime_mfconstants)
        # merge all yaml runtime settings into config
        merge_config = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_configuration = config_json.get('Configuration', {})
            logger.trace("In DTAPMode %s, so seeding runtime configuration "
                         "with Configuration from config.json: %s" %
                         (self.get_dtap_mode(), config_json_configuration))
            merge_config.update(config_json_configuration)
        merge_config.update(self._conf['mxruntime'])
        logger.trace("Merging current mxruntime config into it... %s" %
                     self._conf['mxruntime'])
        # replace 'MicroflowConstants' with mfconstants we just figured out
        # before to prevent dict-deepmerge-problems
        merge_config['MicroflowConstants'] = merge_constants
        logger.trace("Replacing 'MicroflowConstants' with constants we just "
                     "figured out: %s" % merge_constants)
        # the merged result will be put back into self._conf['mxruntime']
        logger.debug("Merged runtime configuration: %s" % merge_config)
        return merge_config
Example #36
0
        try:
            fd = open(jsonfile)
        except Exception, e:
            logger.debug("Error reading configuration file %s: %s; "
                         "ignoring..." % (jsonfile, e))
            return {}

        config = None
        try:
            config = json.load(fd)
        except Exception, e:
            logger.error("Error parsing configuration file %s: %s" %
                         (jsonfile, e))
            return {}

        logger.trace("contents read from %s: %s" % (jsonfile, config))
        return config

    def mtime_changed(self):
        for yamlfile, mtime in self._mtimes.iteritems():
            if os.stat(yamlfile)[8] != mtime:
                return True
        return False

    def dump(self):
        print(yaml.dump(self._conf))

    def _check_appcontainer_config(self):
        # did we load any configuration at all?
        if not self._conf:
            logger.critical("No configuration present. Please put a m2ee.yaml "
Example #37
0
    def _merge_microflow_constants(self):
        """
        3.0: config.json "contains the configuration settings of the active
        configuration (in the Modeler) at the time of deployment." It also
        contains default values for microflow constants. D/T configuration is
        not stored in the mdp anymore, so for D/T we need to insert it into
        the configuration we read from yaml (yay!)
        { "Configuration": { "key": "value", ... }, "Constants": {
        "Module.Constant": "value", ... } } also... move the custom section
        into the MicroflowConstants runtime config option where 3.0 now
        expects them to be! yay... (when running 2.5, the MicroflowConstants
        part of runtime config will be sent using the old
        update_custom_configuration m2ee api call. Fun!
        """

        logger.debug("Merging microflow constants configuration...")

        config_json = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_file = os.path.join(self._conf['m2ee']['app_base'],
                                            'model', 'config.json')
            logger.trace("In DTAPMode %s, so loading configuration from %s" %
                         (self.get_dtap_mode(), config_json_file))
            config_json = self._try_load_json(config_json_file)

        # figure out which constants to use
        merge_constants = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_constants = config_json.get('Constants', {})
            logger.trace("In DTAPMode %s, so using Constants from "
                         "config.json: %s" %
                         (self.get_dtap_mode(), config_json_constants))
            merge_constants.update(config_json_constants)
        # custom yaml section can override defaults
        yaml_custom = self._conf.get('custom', {})
        if yaml_custom:
            logger.trace("Using constants from custom config section: %s" %
                         yaml_custom)
            merge_constants.update(yaml_custom)
        # 'MicroflowConstants' from runtime yaml section can override
        # default/custom
        yaml_mxruntime_mfconstants = (self._conf['mxruntime'].get(
            'MicroflowConstants', {}))
        if yaml_mxruntime_mfconstants:
            logger.trace("Using constants from mxruntime/MicroflowConstants: "
                         "%s" % yaml_mxruntime_mfconstants)
            merge_constants.update(yaml_mxruntime_mfconstants)
        # merge all yaml runtime settings into config
        merge_config = {}
        if not self.get_dtap_mode()[0] in ('A', 'P'):
            config_json_configuration = config_json.get('Configuration', {})
            logger.trace("In DTAPMode %s, so seeding runtime configuration "
                         "with Configuration from config.json: %s" %
                         (self.get_dtap_mode(), config_json_configuration))
            merge_config.update(config_json_configuration)
        merge_config.update(self._conf['mxruntime'])
        logger.trace("Merging current mxruntime config into it... %s" %
                     self._conf['mxruntime'])
        # replace 'MicroflowConstants' with mfconstants we just figured out
        # before to prevent dict-deepmerge-problems
        merge_config['MicroflowConstants'] = merge_constants
        logger.trace("Replacing 'MicroflowConstants' with constants we just "
                     "figured out: %s" % merge_constants)
        # the merged result will be put back into self._conf['mxruntime']
        logger.debug("Merged runtime configuration: %s" % merge_config)
        return merge_config
Example #38
0
def emptydb(config):

    if not config.allow_destroy_db():
        logger.error("Refusing to do a destructive database operation "
                     "because the allow_destroy_db configuration option "
                     "is set to false.")
        return False

    env = os.environ.copy()
    env.update(config.get_pg_environment())

    logger.info("Removing all tables...")
    # get list of drop table commands
    cmd = (
        config.get_psql_binary(), "-t", "-c",
        "SELECT 'DROP TABLE ' || n.nspname || '.\"' || c.relname || '\" CASCADE;' "
        "FROM pg_catalog.pg_class AS c LEFT JOIN pg_catalog.pg_namespace AS n "
        "ON n.oid = c.relnamespace WHERE relkind = 'r' AND n.nspname NOT IN "
        "('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid)")
    logger.trace("Executing %s, creating pipe for stdout,stderr" % str(cmd))
    proc1 = subprocess.Popen(cmd,
                             env=env,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    (stdout, stderr) = proc1.communicate()

    if stderr != '':
        logger.error("An error occured while calling psql: %s" % stderr)
        return False

    stdin = stdout
    cmd = (config.get_psql_binary(), )
    logger.trace("Piping stdout,stderr to %s" % str(cmd))
    proc2 = subprocess.Popen(cmd,
                             env=env,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    (stdout, stderr) = proc2.communicate(stdin)

    if stderr != '':
        logger.error("An error occured while calling psql: %s" % stderr)
        return False

    logger.info("Removing all sequences...")
    # get list of drop sequence commands
    cmd = (config.get_psql_binary(), "-t", "-c",
           "SELECT 'DROP SEQUENCE ' || n.nspname || '.\"' || c.relname || '\" "
           "CASCADE;' FROM pg_catalog.pg_class AS c LEFT JOIN "
           "pg_catalog.pg_namespace AS n ON n.oid = c.relnamespace WHERE "
           "relkind = 'S' AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND "
           "pg_catalog.pg_table_is_visible(c.oid)")
    logger.trace("Executing %s, creating pipe for stdout,stderr" % str(cmd))
    proc1 = subprocess.Popen(cmd,
                             env=env,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    (stdout, stderr) = proc1.communicate()

    if stderr != '':
        logger.error("An error occured while calling psql: %s" % stderr)
        return False

    stdin = stdout
    cmd = (config.get_psql_binary(), )
    logger.trace("Piping stdout,stderr to %s" % str(cmd))
    proc2 = subprocess.Popen(cmd,
                             env=env,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    (stdout, stderr) = proc2.communicate(stdin)

    if stderr != '':
        logger.error("An error occured while calling psql: %s" % stderr)
        return False

    return True
Example #39
0
def psql(pg_env, psql_binary):
    env = os.environ.copy()
    env.update(pg_env)
    cmd = (psql_binary,)
    logger.trace("Executing %s" % str(cmd))
    subprocess.call(cmd, env=env)
Example #40
0
def psql(config):
    env = os.environ.copy()
    env.update(config.get_pg_environment())
    cmd = (config.get_psql_binary(), )
    logger.trace("Executing %s" % str(cmd))
    subprocess.call(cmd, env=env)
Example #41
0
def emptydb(config):

    if not config.allow_destroy_db():
        logger.error(
            "Refusing to do a destructive database operation "
            "because the allow_destroy_db configuration option "
            "is set to false."
        )
        return False

    env = os.environ.copy()
    env.update(config.get_pg_environment())

    logger.info("Removing all tables...")
    # get list of drop table commands
    cmd = (
        config.get_psql_binary(),
        "-t",
        "-c",
        "SELECT 'DROP TABLE ' || n.nspname || '.\"' || c.relname || '\" CASCADE;' "
        "FROM pg_catalog.pg_class AS c LEFT JOIN pg_catalog.pg_namespace AS n "
        "ON n.oid = c.relnamespace WHERE relkind = 'r' AND n.nspname NOT IN "
        "('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid)",
    )
    logger.trace("Executing %s, creating pipe for stdout,stderr" % str(cmd))
    proc1 = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = proc1.communicate()

    if stderr != "":
        logger.error("An error occured while calling psql: %s" % stderr)
        return False

    stdin = stdout
    cmd = (config.get_psql_binary(),)
    logger.trace("Piping stdout,stderr to %s" % str(cmd))
    proc2 = subprocess.Popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = proc2.communicate(stdin)

    if stderr != "":
        logger.error("An error occured while calling psql: %s" % stderr)
        return False

    logger.info("Removing all sequences...")
    # get list of drop sequence commands
    cmd = (
        config.get_psql_binary(),
        "-t",
        "-c",
        "SELECT 'DROP SEQUENCE ' || n.nspname || '.\"' || c.relname || '\" "
        "CASCADE;' FROM pg_catalog.pg_class AS c LEFT JOIN "
        "pg_catalog.pg_namespace AS n ON n.oid = c.relnamespace WHERE "
        "relkind = 'S' AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND "
        "pg_catalog.pg_table_is_visible(c.oid)",
    )
    logger.trace("Executing %s, creating pipe for stdout,stderr" % str(cmd))
    proc1 = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = proc1.communicate()

    if stderr != "":
        logger.error("An error occured while calling psql: %s" % stderr)
        return False

    stdin = stdout
    cmd = (config.get_psql_binary(),)
    logger.trace("Piping stdout,stderr to %s" % str(cmd))
    proc2 = subprocess.Popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = proc2.communicate(stdin)

    if stderr != "":
        logger.error("An error occured while calling psql: %s" % stderr)
        return False

    return True
Example #42
0
def psql(config):
    env = os.environ.copy()
    env.update(config.get_pg_environment())
    cmd = (config.get_psql_binary(),)
    logger.trace("Executing %s" % str(cmd))
    subprocess.call(cmd, env=env)
Example #43
0
def emptydb(pg_env, psql_binary):

    env = os.environ.copy()
    env.update(pg_env)

    answer = raw_input("This command will drop all tables and sequences in "
                       "database %s. Continue? (y)es, (N)o? " %
                       env['PGDATABASE'])
    if answer != 'y':
        print("Aborting!")
        return

    logger.info("Removing all tables...")
    # get list of drop table commands
    cmd = (
        psql_binary, "-t", "-c",
        "SELECT 'DROP TABLE ' || n.nspname || '.' || c.relname || ' CASCADE;' "
        "FROM pg_catalog.pg_class AS c LEFT JOIN pg_catalog.pg_namespace AS n "
        "ON n.oid = c.relnamespace WHERE relkind = 'r' AND n.nspname NOT IN "
        "('pg_catalog', 'pg_toast') AND pg_catalog.pg_table_is_visible(c.oid)"
    )
    logger.trace("Executing %s, creating pipe for stdout,stderr" % str(cmd))
    proc1 = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    (stdout, stderr) = proc1.communicate()

    if stderr != '':
        logger.error("An error occured while calling psql: %s" % stderr)
        return

    stdin = stdout
    cmd = (psql_binary,)
    logger.trace("Piping stdout,stderr to %s" % str(cmd))
    proc2 = subprocess.Popen(cmd, env=env, stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = proc2.communicate(stdin)

    if stderr != '':
        logger.error("An error occured while calling psql: %s" % stderr)
        return

    logger.info("Removing all sequences...")
    # get list of drop sequence commands
    cmd = (
        psql_binary, "-t", "-c",
        "SELECT 'DROP SEQUENCE ' || n.nspname || '.' || c.relname || ' "
        "CASCADE;' FROM pg_catalog.pg_class AS c LEFT JOIN "
        "pg_catalog.pg_namespace AS n ON n.oid = c.relnamespace WHERE "
        "relkind = 'S' AND n.nspname NOT IN ('pg_catalog', 'pg_toast') AND "
        "pg_catalog.pg_table_is_visible(c.oid)"
    )
    logger.trace("Executing %s, creating pipe for stdout,stderr" % str(cmd))
    proc1 = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    (stdout, stderr) = proc1.communicate()

    if stderr != '':
        logger.error("An error occured while calling psql: %s" % stderr)
        return

    stdin = stdout
    cmd = (psql_binary,)
    logger.trace("Piping stdout,stderr to %s" % str(cmd))
    proc2 = subprocess.Popen(cmd, env=env, stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = proc2.communicate(stdin)

    if stderr != '':
        logger.error("An error occured while calling psql: %s" % stderr)
        return
Example #44
0
            logger.error("The unzip program could not be found: %s" %
                         ose.strerror)
        else:
            logger.error("An error occured while executing unzip: %s" % ose)
        return False

    logger.debug("Removing everything in model/ and web/ locations...")
    # TODO: error handling. removing model/ and web/ itself should not be
    # possible (parent dir is root owned), all errors ignored for now
    app_base = config.get_app_base()
    shutil.rmtree(os.path.join(app_base, 'model'), ignore_errors=True)
    shutil.rmtree(os.path.join(app_base, 'web'), ignore_errors=True)

    logger.debug("Extracting archive...")
    cmd = ("unzip", "-oq", mda_file_name, "web/*", "model/*", "-d", app_base)
    logger.trace("Executing %s" % str(cmd))
    proc = subprocess.Popen(cmd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    (stdout, stderr) = proc.communicate()

    if proc.returncode != 0:
        logger.error("An error occured while extracting archive:")
        logger.error("stdout: %s" % stdout)
        logger.error("stderr: %s" % stderr)
        return False
    else:
        logger.trace("stdout: %s" % stdout)
        logger.trace("stderr: %s" % stderr)

    # XXX: reset permissions on web/ model/ to be sure after executing this