Ejemplo n.º 1
0
 def umount(self):
     self.mountprocess_lock_acquire()
     try:
         if not os.path.isdir(self.hash_id_path):
             logger.info('Mountpoint %s does not exist.' % self.hash_id_path, self)
         else:
             if not self.is_mounted():
                 logger.info('Mountpoint %s is not mounted' % self.hash_id_path, self)
             else:
                 if self.check_mount_lock():
                     logger.info('Mountpoint %s still in use. Keep mounted' % self.mountpoint, self)
                 else:
                     self.pre_umount_check()
                     self._umount()
                     self.post_umount_check()
                     if os.listdir(self.mountpoint):
                         logger.warning('Mountpoint %s not empty after unmount' %self.mountpoint, self)
                     else:
                         logger.info('unmount %s from %s'
                                     %(self.log_command, self.mountpoint),
                                     self)
     except Exception:
         raise
     else:
         self.del_mount_lock()
         self.remove_symlink()
     finally:
         self.mountprocess_lock_release()
Ejemplo n.º 2
0
    def enhance_commands(self, commands_list, templating_values):
        """Checks and enchances with known listed commands"""

        tvalues = helpers.merge_templates(templating_values)

        # preserve order while getting rid of dup entries
        unique_list = []
        [
            unique_list.append(single_command)
            for single_command in commands_list
            if single_command not in unique_list
        ]  # pylint: disable=expression-not-assigned
        invalid_commands = [
            command for command in list(unique_list)
            if command not in self._commands
        ]

        if len(invalid_commands) > 0:
            logger.warning("Invalid command(s) found",
                           commands=invalid_commands,
                           module=COMMAND_MODULE_INIT)
            return []

        enhance_list = [self._commands[command] for command in unique_list]

        filtered_commands = [{
            "cmd":
            helpers.render_template(command.cmd, tvalues),
            "command":
            command
        } for command in enhance_list]

        return filtered_commands
Ejemplo n.º 3
0
def updateStatus(devId, status):
    db = init()
    cursor = db.cursor()
    cursor.execute("update devices set status=? where devId=?",
                   (devId, status))
    db.commit()
    l.warning('Updated IoT Device status')
    def delete_pool(self, batch_service_client: batch.BatchExtensionsClient):
        """
        Deletes the pool the if the pool, if the pool has already been deleted or marked for deletion it
        should ignore the batch exception that is thrown. These errors come up due to multiple jobs using the same pool
        and when a the job cleans up after it's self it will call delete on the same pool since they are a shared resource.

        :param batch_service_client: A Batch service client.
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        """
        logger.info("Deleting pool: {}.".format(self.pool_id))
        try:
            batch_service_client.pool.delete(self.pool_id)
        except batchmodels.batch_error.BatchErrorException as batch_exception:
            if utils.expected_exception(
                    batch_exception, "The specified pool has been marked for deletion"):
                logger.warning(
                    "The specified pool [{}] has been marked for deletion.".format(
                        self.pool_id))
            elif utils.expected_exception(batch_exception, "The specified pool does not exist"):
                logger.warning(
                    "The specified pool [{}] has been deleted.".format(
                        self.pool_id))
            else:
                traceback.print_exc()
                utils.print_batch_exception(batch_exception)
    def submit_pool(self, batch_service_client: batch.BatchExtensionsClient, template: str):
        """
        Submits a batch pool based on the template 

        :param batch_service_client: The batch client used for making batch operations
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param template: The in memory version of the template used to create a the job.
        :type template: str
        """
        parameters = ctm.load_file(self.parameters_file)
        pool_json = batch_service_client.pool.expand_template(template, parameters)
        ctm.set_template_pool_id(template, self.pool_id)
        pool = batch_service_client.pool.poolparameter_from_json(pool_json)
        logger.info('Creating pool [{}]...'.format(self.pool_id))
        try:
            batch_service_client.pool.add(pool)
        except batchmodels.batch_error.BatchErrorException as err:
            if utils.expected_exception(
                    err, "The specified pool already exists"):
                logger.warning(
                    "Pool [{}] is already being created.".format(
                        self.pool_id))
            else:
                logger.info("Create pool error: {}".format(err))
                traceback.print_exc()
                utils.print_batch_exception(err)
Ejemplo n.º 6
0
    def search(self,
               query,
               max_tweets,
               count=100,
               lang='en',
               show_user=False,
               **kwargs):
        if 'retweet' not in kwargs or not kwargs['retweet']:
            query += " -filter:retweets"
        result_type = kwargs[
            "result_type"] if "result_type" in kwargs else self.RECENT
        tweet_count = 0
        max_id = -1
        since_id = None
        while tweet_count < max_tweets:
            try:
                if max_id <= 0:
                    if not since_id:
                        new_tweets = self.api.search(q=query,
                                                     count=count,
                                                     lang=lang,
                                                     show_user=show_user,
                                                     result_type=result_type)

                    else:
                        new_tweets = self.api.search(q=query,
                                                     count=count,
                                                     lang=lang,
                                                     since_id=since_id,
                                                     show_user=show_user,
                                                     result_type=result_type)
                else:
                    if not since_id:
                        new_tweets = self.api.search(q=query,
                                                     count=count,
                                                     max_id=str(max_id - 1),
                                                     show_user=show_user,
                                                     result_type=result_type)
                    else:
                        new_tweets = self.api.search(q=query,
                                                     count=count,
                                                     max_id=str(max_id - 1),
                                                     since_id=since_id,
                                                     show_user=show_user,
                                                     result_type=result_type)
                if not new_tweets:
                    logger.info(__name__, "No new tweets found")
                    break
                # since_id = new_tweets[-1].id
                max_id = new_tweets[-1].id
                tweet_count += len(new_tweets)
                if max_tweets - count <= tweet_count < max_tweets:
                    result_type = self.POPULAR
                return new_tweets
            except tweepy.RateLimitError:
                logger.warning(__name__, "Rate limit error")
                time.sleep(15 * 60)
            except tweepy.TweepError as e:
                logger.error(__name__, "TweepError: {}".format(e))
                break
Ejemplo n.º 7
0
def get_upper_bound(K_guess, d1, d2, d3, V, beta, alphas):
    func = lambda K : get_sigbar1(K, d1, d2, d3, V, beta, alphas)
    try:
        return scipy.optimize.fsolve(func, K_guess)
    except:
        logger.warning("Unable to solve for K. Setting upper bound to 1000.")
        return 1000
Ejemplo n.º 8
0
def copy_tags(src, dst):
    for tag in sorted(src):
        try:
            dst[tag] = src[tag]
        except ValueError:
            logger.warning("Error settings tag " + tag)
            pass
Ejemplo n.º 9
0
    def readPidFile(self):
        """
        Read the pid and procname from the file

        Returns:
            tuple:  tuple of (pid(int), procname(str))
        """
        pid = 0
        procname = ''
        try:
            with open(self.pidFile, 'rt') as f:
                data = f.read()
            data = data.split('\n', 1)
            if data[0].isdigit():
                pid = int(data[0])
            if len(data) > 1:
                procname = data[1].strip('\n')
        except OSError as e:
            logger.warning(
                'Failed to read PID and process name from %s: [%s] %s' %
                (e.filename, e.errno, e.strerror))
        except ValueError as e:
            logger.warning(
                'Failed to extract PID and process name from %s: %s' %
                (self.pidFile, str(e)))
        return (pid, procname)
Ejemplo n.º 10
0
    def evaluate(self, meta, params, warning_only=False, default=None, line=None):
        props = []
        for v in self.values:
            prop = False
            if isinstance(v, BoolOperator) and not v.evaluate(meta, params, default, line):
                return False
            elif isinstance(v, MetaObject):
                prop = v.evaluate(meta, params, default, line)
            elif v == meta:
                prop = Atomic(v)
                prop.value = v
            if prop == Commands.Remove:
                return prop
            if prop:
                if isinstance(prop, list):
                    props += prop
                else:
                    props.append(prop)

        if not props and warning_only:
            logger.warning(line, 'Value is not allowed', meta)
            if default:
                props = [default]
            else:
                props = [Commands.Remove]

        # two types of or: on a list or a value
        if not isinstance(meta, list) and len(props) == 1:
            return props[0]
        return props
Ejemplo n.º 11
0
    def backupConfig(self):
        """
        create a backup of encfs config file into local config folder
        so in cases of the config file get deleted or corrupt user can restore
        it from there
        """
        cfg = self.configFile()
        if not os.path.isfile(cfg):
            logger.warning(
                'No encfs config in %s. Skip backup of config file.' % cfg,
                self)
            return
        backup_folder = self.config.encfsconfigBackupFolder(self.profile_id)
        tools.makeDirs(backup_folder)
        old_backups = os.listdir(backup_folder)
        old_backups.sort(reverse=True)
        if len(old_backups):
            last_backup = os.path.join(backup_folder, old_backups[0])

            #don't create a new backup if config hasn't changed
            if tools.md5sum(cfg) == \
               tools.md5sum(last_backup):
                logger.debug('Encfs config did not change. Skip backup', self)
                return

        new_backup_file = '.'.join(
            (os.path.basename(cfg), datetime.now().strftime('%Y%m%d%H%M')))
        new_backup = os.path.join(backup_folder, new_backup_file)
        logger.debug(
            'Create backup of encfs config %s to %s' % (cfg, new_backup), self)
        shutil.copy2(cfg, new_backup)
Ejemplo n.º 12
0
def analyse_parsed_msas(msas, op):
    """ Analyse results from run_parsing_step and store them into msas """
    output_dir = op.output_dir
    core_assignment = op.core_assignment
    parse_run_output_dir = os.path.join(output_dir, "parse_run")
    parse_run_results = os.path.join(parse_run_output_dir, "results")
    invalid_msas = []
    count = 0
    for name, msa in msas.items():
        count += 1
        parse_fasta_output_dir = os.path.join(parse_run_results, name)
        parse_run_log = os.path.join(
            os.path.join(parse_fasta_output_dir, name + ".raxml.log"))
        msa.binary_path = os.path.join(
            os.path.join(parse_fasta_output_dir, name + ".raxml.rba"))
        parse_result = parse_msa_info(parse_run_log, msa, core_assignment)
        if (not msa.valid):
            invalid_msas.append(msa)
    improve_cores_assignment(msas, op)
    predict_number_cores(msas, op)
    if (len(invalid_msas) > 0):
        invalid_msas_file = os.path.join(output_dir, "invalid_msas.txt")
        logger.warning("Found " + str(len(invalid_msas)) +
                       " invalid MSAs (see " + invalid_msas_file + ")")
        with open(invalid_msas_file, "w") as f:
            for msa in invalid_msas:
                f.write(msa.name + "\n")
    save_msas(msas, op)
Ejemplo n.º 13
0
def unload(msg = None, module = None):
    """ unload a module from labere """
    
    if msg is not None and str(msg.origin) not in var.database.__refero__()['misc']['labop_extended']:
        msg.origin.privmsg(msg.params, 'You do not have the misc:labop_extended privilege')
        return False
    if module is None:
        msg.origin.privmsg(msg.params, "Insufficient parameters for \x02UNLOAD\x02")
        msg.origin.privmsg(msg.params, 'Syntax: /msg %s UNLOAD <module>' % (var.bots[msg.params].data['nick']))
        return False
    uplink, events = var.core
    try: 
        if module not in var.modules:
            logger.warning('unload(): %s is not in the loaded modules list' % (module))
            return False
    except: 
        logger.error('unload(): unknown error occurred')
        return False
    mod = var.modules[module]
    try: mod.close(msg)
    except:
        logger.info('unload(): error uninitializing %s' % (mod.__name__))
        logger.info('%s' % (traceback.format_exc(4)))
        return False
    var.modules.pop(module)
    if msg: msg.origin.privmsg(msg.params, '%s has been unloaded.' % (mod.__name__))
    events.onmodunload(module)
Ejemplo n.º 14
0
def parse_hardcoded_parameters(hardcoded_parameters_file):
    parameter_hardcoder = ParameterHardcoder()
    if hardcoded_parameters_file is not None:
        line_number = 0
        with open(hardcoded_parameters_file) as f:
            for line in f:
                line_number += 1
                if line is None or not line.strip() or line.strip().startswith("#"):
                    pass
                else:
                    # the third column must not be obtained as a whole, and not split
                    parsed_hardcoded_parameter = [ _ for _ in line.strip().split("\t") if _ != ""]
                    # valid lines contain two or three columns
                    if not (2 <= len(parsed_hardcoded_parameter) <= 3):
                        warning("Invalid line at line number %d of the given hardcoded parameters file. Line will be"
                                "ignored:\n%s" % (line_number, line), 0)
                        continue

                    parameter_name = parsed_hardcoded_parameter[0]
                    hardcoded_value = parsed_hardcoded_parameter[1]
                    tool_names = None
                    if len(parsed_hardcoded_parameter) == 3:
                        tool_names = parsed_hardcoded_parameter[2].split(',')
                    if tool_names:
                        for tool_name in tool_names:
                            parameter_hardcoder.register_parameter(parameter_name, hardcoded_value, tool_name.strip())
                    else:
                        parameter_hardcoder.register_parameter(parameter_name, hardcoded_value)

    return parameter_hardcoder
Ejemplo n.º 15
0
    def heartbeat(self) -> bool:
        if self.socket is None:
            self.is_alive = False
            return False
        sock_f = self.socket.makefile(mode='r', encoding='utf-8')
        msg = MsgField.HEARTBEAT
        msg['sn'] = self.sn
        msg = bytes(json.dumps(msg) + '\n', encoding='utf-8')
        self.socket.sendall(msg)

        for poll in range(3):
            gevent.sleep(1)
            try:
                data = sock_f.readline()
                if data:
                    logger.info(
                        '{}: device({}) heartbeat was succeed, {}'.format(
                            __file__, self.sn, data))
                    self.is_alive = True
                    return True
            except:
                err_msg = traceback.format_exc()
                logger.info('{}: device({}) heartbeat was fail, {}'.format(
                    __file__, self.sn, err_msg))
                continue

        logger.warning('{}: device({}) heartbeat was fail'.format(
            __file__, self.sn))
        self.is_alive = False
        return False
Ejemplo n.º 16
0
def crawing():

    logger.info("crawing start..")
    while True:
        for op in config.spiderConfig:

            urls = op["urls"]
            if isinstance(urls, str):
                urls = [urls]

            for url in urls:
                parser = Parser(op["proxyre"], op["ipre"], op["portre"],
                                op["proxyre"])
                spider = Spider(url, parser)

                proxys = spider.getProxys()
                logger.info("crawl proxys %s" % proxys)

                if len(proxys) == 0:
                    logger.warning("crawl error for urls: %s" % op["urls"])

                for proxy in proxys:
                    proxyapi.add(proxy)

        logger.info("next crawing.")
        time.sleep(60 * 10)
Ejemplo n.º 17
0
 def callback(self, *args, profileID=None):
     if profileID is None:
         profileID = self.config.currentProfile()
     profileName = self.config.profileName(profileID)
     cmd = [self.script, profileID, profileName]
     cmd.extend([str(x) for x in args])
     logger.debug('Call user-callback: %s' % ' '.join(cmd), self)
     if self.config.userCallbackNoLogging():
         stdout, stderr = None, None
     else:
         stdout, stderr = PIPE, PIPE
     try:
         callback = Popen(cmd,
                          stdout=stdout,
                          stderr=stderr,
                          universal_newlines=True)
         output = callback.communicate()
         if output[0]:
             logger.info(
                 'user-callback returned \'%s\'' % output[0].strip('\n'),
                 self)
         if output[1]:
             logger.error(
                 'user-callback returned \'%s\'' % output[1].strip('\n'),
                 self)
         if callback.returncode != 0:
             logger.warning(
                 'user-callback returncode: %s' % callback.returncode, self)
             raise StopException()
     except OSError as e:
         logger.error(
             "Exception when trying to run user callback: %s" % e.strerror,
             self)
Ejemplo n.º 18
0
def print_stats(op):
    failed_commands = os.path.join(op.output_dir, "failed_commands.txt")
    if (os.path.isfile(failed_commands)):
        failed_number = len(open(failed_commands).readlines())
        logger.warning("Total number of jobs that failed: " +
                       str(failed_number))
        logger.warning("For a detailed list, see " + failed_commands)
Ejemplo n.º 19
0
def on_message(data):
    keys = db.getKeys()
    data['status'] = func.decrypt(keys['privKey'], unhexlify(data['status']))
    data['time'] = func.decrypt(keys['privKey'], unhexlify(data['time']))
    #sio.emit('my response', {'response': 'my response'})
    db.addAuth(data['status'], float(time.time() - float(data['time'])))
    l.warning('message received with {0}'.format(data))
Ejemplo n.º 20
0
    def has_node(self, node_name, warn=True):
        """
        Tests if a ROS node actually exists.

        This method checks whether a ROS node named $node_name exists in the current ROS package.

        :param node_name: name of ROS node to test
        :param warn: True if a warning about the missing node should be emitted
        :return: True if node exists, False otherwise
        """
        pkg = os.path.join(self.path, '../..')
        # Just consider files that are executable:
        if [
                f for f in Package.get_paths_to_file(pkg, node_name)
                if os.access(f, os.X_OK)
        ]:
            # if len(res) > 1:
            #     log.warning("Found {} executable files named {}, assuming existence."
            #                 .format(len(res), node_name, res[0]))
            return True
        else:
            if warn:
                logger.warning("Node '{}' in package '{}' not found.".format(
                    node_name, self.name))
            return False
Ejemplo n.º 21
0
def authentication(update, context):
    chatid = update.effective_message.chat_id
    with open(CHATID_PATH, "r") as file:
        if (str(chatid) in file.read()):
            context.bot.send_message(
                chat_id=update.effective_message.chat_id,
                text=i18n.t("addarr.Chatid already allowed"),
            )
            file.close()
        else:
            file.close()
            password = update.message.text
            if ("/auth" in password):
                password = password.replace("/auth ", "")
            if password == config["telegram"]["password"]:
                with open(CHATID_PATH, "a") as file:
                    file.write(str(chatid) + "\n")
                    context.bot.send_message(
                        chat_id=update.effective_message.chat_id,
                        text=i18n.t("addarr.Chatid added"),
                    )
                    file.close()
                    return "added"
            else:
                logger.warning(
                    f"Failed authentication attempt by [{update.message.from_user.username}]. Password entered: [{password}]"
                )
                context.bot.send_message(
                    chat_id=update.effective_message.chat_id,
                    text=i18n.t("addarr.Wrong password"))
                return ConversationHandler.END  # This only stops the auth conv, so it goes back to choosing screen
Ejemplo n.º 22
0
    def _i_partition_size(self):
        # TODO FIX: something weird here
        # available_local_memory
        warning('temporary fix to available local memory computation (-512)')
        available_local_memory = _max_local_memory - 512
        # 16bytes local mem used for global / local indices and sizes
        available_local_memory -= 16
        # (4/8)ptr size per dat passed as argument (dat)
        available_local_memory -= (_address_bits / 8) * (len(
            self._unique_dat_args) + len(self._all_global_non_reduction_args))
        # (4/8)ptr size per dat/map pair passed as argument (ind_map)
        available_local_memory -= (_address_bits / 8) * len(self._unique_indirect_dat_args)
        # (4/8)ptr size per global reduction temp array
        available_local_memory -= (_address_bits / 8) * len(self._all_global_reduction_args)
        # (4/8)ptr size per indirect arg (loc_map)
        available_local_memory -= (_address_bits / 8) * len(self._all_indirect_args)
        # (4/8)ptr size * 7: for plan objects
        available_local_memory -= (_address_bits / 8) * 7
        # 1 uint value for block offset
        available_local_memory -= 4
        # 7: 7bytes potentialy lost for aligning the shared memory buffer to 'long'
        available_local_memory -= 7
        # 12: shared_memory_offset, active_thread_count,
        #     active_thread_count_ceiling variables (could be 8 or 12 depending)
        #     and 3 for potential padding after shared mem buffer
        available_local_memory -= 12 + 3
        # 2 * (4/8)ptr size + 1uint32: DAT_via_MAP_indirection(./_size/_map) per
        # dat map pairs
        available_local_memory -= 4 + \
            (_address_bits / 8) * 2 * len(self._unique_indirect_dat_args)
        # inside shared memory padding
        available_local_memory -= 2 * (len(self._unique_indirect_dat_args) - 1)

        max_bytes = sum(map(lambda a: a.data._bytes_per_elem, self._all_indirect_args))
        return available_local_memory / (2 * _warpsize * max_bytes) * (2 * _warpsize)
Ejemplo n.º 23
0
    def transcode(self, purename, bakfile, vidfile, mobfile, imgfile, logfile):
        flvcmd=self.makeVidCmd_BEST(bakfile,vidfile,logfile, 44100, '96k', 'flv', 640, 480, "renren-inc", "2005-2011", "Gernerated by renrenVideo")
        logger.info( '['+self.name+'] ['+purename+'] vfile_ffmpeging ...')
        os.system('echo = = = = = = = = '+flvcmd)
        os.system(flvcmd)
 
        status=0
        if (os.path.exists(vidfile)==False):  #没有视频文件生成
            logger.warning( '['+self.name+'] ['+purename+'] vfile_ffmpeg_fail' )
 
        else:
            status=1
            logger.info( '['+self.name+'] ['+purename+'] vfile_ffmpeg_succ: '+ vidfile )
 
            mobcmd=self.makeVidCmd_MOB(bakfile,mobfile,logfile, 44100, '64k', 'mp4', 320, 240, "renren-inc", "2005-2011", "Gernerated by renrenVideo")
            logger.info( '['+self.name+'] ['+purename+'] mfile_ffmpeging ...')
            os.system('echo = = = = = = = = '+mobcmd)
            os.system(mobcmd)
 
            jpgcmd=self.makeJpgCmd(vidfile,imgfile,logfile,'','',320,240)
            logger.info( '['+self.name+'] ['+purename+'] ifile_ffmpeging ...' )
            os.system('echo = = = = = = = = '+jpgcmd)
            os.system(jpgcmd)
 
            if (os.path.exists(imgfile)==False):  #没有缩略图文件生成
                logger.warning( '['+self.name+'] ['+purename+'] ifile_ffmpeg_fail')
            else:
                logger.info( '['+self.name+'] ['+purename+'] ifile_ffmpeg_succ: '+ imgfile )
                status=2
 
        return status
Ejemplo n.º 24
0
def filePush(fileList):
    """Pushes individual files to SFDC

    fileList - An array of file names to push
    """
    if len(fileList) == 0:
        logger.critical('No files listed to push')
        sys.exit(-1)

    file_list = ''

    for fname in fileList:
        file_path = os.path.join(os.path.expanduser(getRootDir()), fname)
        if os.path.exists(file_path):
            file_list = "%s%s%s" %(file_list, fname, os.pathsep,)
        else:
            logger.warning('Unable to find file "%s".  Skipping.' % (file_path,))

    if not file_list == '':
        file_list[:-2]
        addFlag('%s="%s"' % ('sf.files2push', file_list,))
        print getFlags()
        runAnt('file-push')
    else:
        logger.critical('Unable to find any files to push.')
        sys.exit(-1)
    def submit_pool(self, batch_service_client: batch.BatchExtensionsClient,
                    template: str):
        """
        Submits a batch pool based on the template 

        :param batch_service_client: The batch client used for making batch operations
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        :param template: The in memory version of the template used to create a the job.
        :type template: str
        """
        parameters = ctm.load_file(self.parameters_file)

        #updates any placeholder parameter values with the values from keyVault, if required
        utils.update_params_with_values_from_keyvault(
            parameters, self.keyvault_client_with_url)
        pool_json = batch_service_client.pool.expand_template(
            template, parameters)
        ctm.set_template_pool_id(template, self.pool_id)
        pool = batch_service_client.pool.poolparameter_from_json(pool_json)
        logger.info('Creating pool [{}]...'.format(pool))
        try:
            batch_service_client.pool.add(pool)
        except batchmodels.BatchErrorException as err:
            if utils.expected_exception(err,
                                        "The specified pool already exists"):
                logger.warning("Pool [{}] is already being created.".format(
                    self.pool_id))
            else:
                logger.info("Create pool error: {}".format(err))
                traceback.print_exc()
                utils.print_batch_exception(err)
Ejemplo n.º 26
0
def sshKeyGen(keyfile):
    """
    Generate a new ssh-key pair (private and public key) in ``keyfile`` and
    ``keyfile``.pub

    Args:
        keyfile (str):  path for private key file

    Returns:
        bool:           True if successful; False if ``keyfile`` already exist
                        or if there was an error
    """
    if os.path.exists(keyfile):
        logger.warning('SSH keyfile "{}" already exist. Skip creating a new one'.format(keyfile))
        return False
    cmd = ['ssh-keygen', '-t', 'rsa', '-N', '', '-f', keyfile]
    proc = subprocess.Popen(cmd,
                            stdout = subprocess.DEVNULL,
                            stderr = subprocess.PIPE,
                            universal_newlines = True)
    out, err = proc.communicate()
    if proc.returncode:
        logger.error('Failed to create a new ssh-key: {}'.format(err))
    else:
        logger.info('Successfully create new ssh-key "{}"'.format(keyfile))
    return not proc.returncode
Ejemplo n.º 27
0
    def get_artist_data(self, url):
        """ returns a dictionary of the artist's biography information """

        artist_bios_xpath = "//ul[@class='artist_block_bio']/li"
        artist_bio_dict = {}
        last_artist_bio_key = None

        try:
            artist_bios_elements = self.my_driver.find_elements_by_xpath(
                artist_bios_xpath)

            for artist_bio_e in artist_bios_elements:

                if ": " in artist_bio_e.text:
                    artist_bio = artist_bio_e.text.split(": ")
                    artist_bio_dict[artist_bio[0]] = artist_bio[1]
                    last_artist_bio_key = artist_bio[0]

                elif last_artist_bio_key:
                    artist_bio_dict[last_artist_bio_key] += artist_bio_e.text

            logger.log(f"parsed artist's biography:' {artist_bio_dict}")

            return artist_bio_dict

        except Exception as e:
            if not self.handle_crash(url, e):
                logger.warning(
                    f"Failed to get the artist's biography information, exception: {e}. Reloading"
                )
                self.my_driver.driver.get(url)
                return artist_bio_dict
Ejemplo n.º 28
0
    def get_data_as_json_file_by_artist(self, curr_url, artist_name,
                                        artist_albums_cnt, artist_songs_cnt):
        """ returns a data dictionary of the artist's biography information and songs data """

        # get data
        data_by_artist_dict = {
            consts.ARTIST_DATA: {
                consts.ARTIST_NAME: artist_name,
                consts.ARTIST_BIO: self.get_artist_data(curr_url),
                consts.ALBUMS_CNT: artist_albums_cnt,
                consts.SONGS_CNT: artist_songs_cnt
            },
            consts.SONGS_DATA:
            self.navigate_pages(curr_url, [], artist_name,
                                self.navigate_songs_single_page)
        }

        # dump dictionary to json file by artist name
        try:
            file_name = f"json_files/{artist_name}.json"
            with open(file_name, 'w', encoding='utf-8') as f:
                json.dump(data_by_artist_dict, f, ensure_ascii=False, indent=4)

        except Exception as e:
            if not self.handle_crash(curr_url, e):
                logger.warning(
                    f"Failed to dump artist {artist_name} to json file, exception: {e}."
                )
Ejemplo n.º 29
0
def parse_hardcoded_parameters(hardcoded_parameters_file):
    parameter_hardcoder = ParameterHardcoder()
    if hardcoded_parameters_file is not None:
        line_number = 0
        with open(hardcoded_parameters_file) as f:
            for line in f:
                line_number += 1
                if line is None or not line.strip() or line.strip().startswith("#"):
                    pass
                else:
                    # the third column must not be obtained as a whole, and not split
                    parsed_hardcoded_parameter = line.strip().split(None, 2)
                    # valid lines contain two or three columns
                    if len(parsed_hardcoded_parameter) != 2 and len(parsed_hardcoded_parameter) != 3:
                        warning("Invalid line at line number %d of the given hardcoded parameters file. Line will be"
                                "ignored:\n%s" % (line_number, line), 0)
                        continue

                    parameter_name = parsed_hardcoded_parameter[0]
                    hardcoded_value = parsed_hardcoded_parameter[1]
                    tool_names = None
                    if len(parsed_hardcoded_parameter) == 3:
                        tool_names = parsed_hardcoded_parameter[2].split(',')
                    if tool_names:
                        for tool_name in tool_names:
                            parameter_hardcoder.register_parameter(parameter_name, hardcoded_value, tool_name.strip())
                    else:
                        parameter_hardcoder.register_parameter(parameter_name, hardcoded_value)

    return parameter_hardcoder
Ejemplo n.º 30
0
    def do_connect(self, reconnect = False):
        # Create the socket and connect to the server
        if reconnect == True:
            logger.warning('Connection failed, retrying in '+str(self._retrydelay)+ ' seconds')
            yield gen.sleep(self._retrydelay)

        while self._connection == None:
            logger.debug('Connecting to {}:{}'.format(config.ENVISALINKHOST, config.ENVISALINKPORT))
            try:
                self._connection = yield self.tcpclient.connect(config.ENVISALINKHOST, config.ENVISALINKPORT)
                self._connection.set_close_callback(self.handle_close)
            except StreamClosedError:
                #failed to connect, but got no connection object so we will loop here
                logger.warning('Connection failed, retrying in '+str(self._retrydelay)+ ' seconds')
                yield gen.sleep(self._retrydelay)
                continue

            try:
                line = yield self._connection.read_until(self._terminator)
            except StreamClosedError:
                #in this state, since the connection object isnt none, its going to throw the callback for handle_close so we just bomb out.
                #and let handle_close deal with this
                return

            logger.debug("Connected to %s:%i" % (config.ENVISALINKHOST, config.ENVISALINKPORT))
            self.handle_line(line)
Ejemplo n.º 31
0
    def backupConfig(self):
        """
        create a backup of encfs config file into local config folder
        so in cases of the config file get deleted or corrupt user can restore
        it from there
        """
        cfg = self.configFile()
        if not os.path.isfile(cfg):
            logger.warning('No encfs config in %s. Skip backup of config file.' %cfg, self)
            return
        backup_folder = self.config.encfsconfigBackupFolder(self.profile_id)
        tools.makeDirs(backup_folder)
        old_backups = os.listdir(backup_folder)
        old_backups.sort(reverse = True)
        if len(old_backups):
            last_backup = os.path.join(backup_folder, old_backups[0])

            #don't create a new backup if config hasn't changed
            if tools.md5sum(cfg) == \
               tools.md5sum(last_backup):
                logger.debug('Encfs config did not change. Skip backup', self)
                return

        new_backup_file = '.'.join((os.path.basename(cfg), datetime.now().strftime('%Y%m%d%H%M')))
        new_backup = os.path.join(backup_folder, new_backup_file)
        logger.debug('Create backup of encfs config %s to %s'
                     %(cfg, new_backup), self)
        shutil.copy2(cfg, new_backup)
Ejemplo n.º 32
0
def _execute( cmd, callback = None, user_data = None ):
    logger.debug("Call command \"%s\"" %cmd, traceDepth = 1)
    ret_val = 0

    if callback is None:
        ret_val = os.system( cmd )
    else:
        pipe = os.popen( cmd, 'r' )

        while True:
            line = temp_failure_retry( pipe.readline )
            if not line:
                break
            callback( line.strip(), user_data )

        ret_val = pipe.close()
        if ret_val is None:
            ret_val = 0

    if ret_val != 0:
        logger.warning("Command \"%s\" returns %s"
                       %(cmd, ret_val),
                       traceDepth = 1)
    else:
        logger.debug("Command \"%s...\" returns %s"
                     %(cmd[:min(16, len(cmd))], ret_val),
                     traceDepth = 1)

    return ret_val
Ejemplo n.º 33
0
def filePush(fileList):
    """Pushes individual files to SFDC

    fileList - An array of file names to push
    """
    if len(fileList) == 0:
        logger.critical('No files listed to push')
        sys.exit(-1)

    file_list = ''

    for fname in fileList:
        file_path = os.path.join(os.path.expanduser(getRootDir()), fname)
        if os.path.exists(file_path):
            file_list = "%s%s%s" %(file_list, fname, os.pathsep,)
        else:
            logger.warning('Unable to find file "%s".  Skipping.' % (file_path,))

    if not file_list == '':
        file_list[:-2]
        addFlag('%s=\'%s\'' % ('sf.files2push', file_list,))
        print getFlags()
        runAnt('file-push')
    else:
        logger.critical('Unable to find any files to push.')
        sys.exit(-1)
Ejemplo n.º 34
0
def inhibitSuspend( app_id = sys.argv[0],
                    toplevel_xid = None,
                    reason = 'take snapshot',
                    flags = INHIBIT_SUSPENDING | INHIBIT_IDLE):
    """
    Prevent machine to go to suspend or hibernate.
    Returns the inhibit cookie which is used to end the inhibitor.
    """
    if not app_id:
        app_id = 'backintime'
    if not toplevel_xid:
        toplevel_xid = 0

    for dbus_props in INHIBIT_DBUS:
        try:
            #connect directly to the socket instead of dbus.SessionBus because
            #the dbus.SessionBus was initiated before we loaded the environ
            #variables and might not work
            if 'DBUS_SESSION_BUS_ADDRESS' in os.environ:
                bus = dbus.bus.BusConnection(os.environ['DBUS_SESSION_BUS_ADDRESS'])
            else:
                bus = dbus.SessionBus()
            interface = bus.get_object(dbus_props['service'], dbus_props['objectPath'])
            proxy = interface.get_dbus_method(dbus_props['methodSet'], dbus_props['interface'])
            cookie = proxy(*[ (app_id, dbus.UInt32(toplevel_xid), reason, dbus.UInt32(flags))[i] for i in dbus_props['arguments'] ])
            logger.info('Inhibit Suspend started. Reason: %s' % reason)
            return (cookie, bus, dbus_props)
        except dbus.exceptions.DBusException:
            pass
    if isRoot():
        logger.debug("Inhibit Suspend failed because BIT was started as root.")
        return
    logger.warning('Inhibit Suspend failed.')
Ejemplo n.º 35
0
    def delete_pool(self, batch_service_client: batch.BatchExtensionsClient):
        """
        Deletes the pool the if the pool, if the pool has already been deleted or marked for deletion it
        should ignore the batch exception that is thrown. These errors come up due to multiple jobs using the same pool
        and when a the job cleans up after it's self it will call delete on the same pool since they are a shared resource.

        :param batch_service_client: A Batch service client.
        :type batch_service_client: `azure.batch.BatchExtensionsClient`
        """
        logger.info("Deleting pool: {}.".format(self.pool_id))
        try:
            batch_service_client.pool.delete(self.pool_id)
        except batchmodels.batch_error.BatchErrorException as batch_exception:
            if utils.expected_exception(
                    batch_exception,
                    "The specified pool has been marked for deletion"):
                logger.warning(
                    "The specified pool [{}] has been marked for deletion.".
                    format(self.pool_id))
            elif utils.expected_exception(batch_exception,
                                          "The specified pool does not exist"):
                logger.warning(
                    "The specified pool [{}] has been deleted.".format(
                        self.pool_id))
            else:
                traceback.print_exc()
                utils.print_batch_exception(batch_exception)
Ejemplo n.º 36
0
def start_app(app_name = 'backintime'):
    """
    Start the requested command or return config if there was no command
    in arguments.

    app_name:   string representing the current application
    """
    create_parsers(app_name)
    #open log
    logger.APP_NAME = app_name
    logger.openlog()

    #parse args
    args = arg_parse(None)

    #add source path to $PATH environ if running from source
    if tools.running_from_source():
        tools.add_source_to_path_environ()

    #warn about sudo
    if tools.usingSudo() and os.getenv('BIT_SUDO_WARNING_PRINTED', 'false') == 'false':
        os.putenv('BIT_SUDO_WARNING_PRINTED', 'true')
        logger.warning("It looks like you're using 'sudo' to start %(app)s. "
                       "This will cause some troubles. Please use either 'sudo -i %(app_name)s' "
                       "or 'pkexec %(app_name)s'."
                       %{'app_name': app_name, 'app': config.Config.APP_NAME})

    #call commands
    if 'func' in dir(args):
        args.func(args)
    else:
        setQuiet(args)
        printHeader()
        return getConfig(args, False)
Ejemplo n.º 37
0
    def __init__( self ):
        self.snapshots = snapshots.Snapshots()
        self.config = self.snapshots.config
        self.decode = None

        if len( sys.argv ) > 1:
            if not self.config.set_current_profile(sys.argv[1]):
                logger.warning("Failed to change Profile_ID %s"
                               %sys.argv[1], self)

        self.qapp = qt4tools.create_qapplication(self.config.APP_NAME)
        translator = qt4tools.get_translator()
        self.qapp.installTranslator(translator)
        self.qapp.setQuitOnLastWindowClosed(False)

        import icon
        self.icon = icon
        self.qapp.setWindowIcon(icon.BIT_LOGO)

        self.status_icon = QSystemTrayIcon(icon.BIT_LOGO)
        #self.status_icon.actionCollection().clear()
        self.contextMenu = QMenu()

        self.menuProfileName = self.contextMenu.addAction(_('Profile: "%s"') % self.config.get_profile_name())
        qt4tools.set_font_bold(self.menuProfileName)
        self.contextMenu.addSeparator()

        self.menuStatusMessage = self.contextMenu.addAction(_('Done'))
        self.menuProgress = self.contextMenu.addAction('')
        self.menuProgress.setVisible(False)
        self.contextMenu.addSeparator()

        self.btnDecode = self.contextMenu.addAction(icon.VIEW_SNAPSHOT_LOG, _('decode paths'))
        self.btnDecode.setCheckable(True)
        self.btnDecode.setVisible(self.config.get_snapshots_mode() == 'ssh_encfs')
        QObject.connect(self.btnDecode, SIGNAL('toggled(bool)'), self.onBtnDecode)

        self.openLog = self.contextMenu.addAction(icon.VIEW_LAST_LOG, _('View Last Log'))
        QObject.connect(self.openLog, SIGNAL('triggered()'), self.onOpenLog)
        self.startBIT = self.contextMenu.addAction(icon.BIT_LOGO, _('Start BackInTime'))
        QObject.connect(self.startBIT, SIGNAL('triggered()'), self.onStartBIT)
        self.status_icon.setContextMenu(self.contextMenu)

        self.pixmap = icon.BIT_LOGO.pixmap(24)
        self.progressBar = QProgressBar()
        self.progressBar.setMinimum(0)
        self.progressBar.setMaximum(100)
        self.progressBar.setValue(0)
        self.progressBar.setTextVisible(False)
        self.progressBar.resize(24, 6)
        self.progressBar.render(self.pixmap, sourceRegion = QRegion(0, -14, 24, 6), flags = QWidget.RenderFlags(QWidget.DrawChildren))

        self.first_error = self.config.is_notify_enabled()
        self.popup = None
        self.last_message = None

        self.timer = QTimer()
        QObject.connect( self.timer, SIGNAL('timeout()'), self.update_info )

        self.ppid = os.getppid()
Ejemplo n.º 38
0
    def evaluate(self, meta, params, warning_only=False, default=None, line=None):
        props = []
        for v in self.values:
            prop = False
            if isinstance(v, BoolOperator) and not v.evaluate(meta, params, default, line):
                return False
            elif isinstance(v, MetaObject):
                prop = v.evaluate(meta, params, default, line)
            elif v == meta:
                prop = Atomic(v)
                prop.value = v
            if prop == Commands.Remove:
                return prop
            if prop:
                if isinstance(prop, list):
                    props += prop
                else:
                    props.append(prop)

        if not props and warning_only:
            logger.warning(line, 'Value is not allowed', meta)
            if default:
                props = [default]
            else:
                props = [Commands.Remove]

        # two types of or: on a list or a value
        if not isinstance(meta, list) and len(props) == 1:
            return props[0]
        return props
Ejemplo n.º 39
0
def rewrite_file(tmpl, mapping, fout=None):
    '''
    Replace text patterns in a text file.

    * tmpl: A text file.
    * mapping: A dictionary of pattern->replacement pairs.
    * fout: [optional] Write to this file rather than overwriting the original. 

    '''
    log.info('Rewriting file: %s' % tmpl)
    fout = fout or tmpl
    t = openfile(tmpl, 'rb')
    s = t.read()
    t.close()
    if '\0' in s:
        log.warning("%s seems to be a binary file. Ignoring it.", tmpl)
        return
    s, n = multireplacen(s, mapping)
    if n:
        write_string(s, fout)
        if n ==1:
            log.info("There was one change.")
        else:
            log.info("There were %s changes." % n)
    else:
        log.info("The specified pattern or patterns were not found in file '%s'. It was not rewritten." % tmpl)
Ejemplo n.º 40
0
def sshKeyGen(keyfile):
    """
    Generate a new ssh-key pair (private and public key) in ``keyfile`` and
    ``keyfile``.pub

    Args:
        keyfile (str):  path for private key file

    Returns:
        bool:           True if successful; False if ``keyfile`` already exist
                        or if there was an error
    """
    if os.path.exists(keyfile):
        logger.warning('SSH keyfile "{}" already exist. Skip creating a new one'.format(keyfile))
        return False
    cmd = ['ssh-keygen', '-t', 'rsa', '-N', '', '-f', keyfile]
    proc = subprocess.Popen(cmd,
                            stdout = subprocess.DEVNULL,
                            stderr = subprocess.PIPE,
                            universal_newlines = True)
    out, err = proc.communicate()
    if proc.returncode:
        logger.error('Failed to create a new ssh-key: {}'.format(err))
    else:
        logger.info('Successfully create new ssh-key "{}"'.format(keyfile))
    return not proc.returncode
Ejemplo n.º 41
0
def main(settings):
    """
    Translates a source language file (or STDIN) into a target language file
    (or STDOUT).
    """
    # Start logging.
    level = logging.DEBUG if settings.verbose else logging.INFO
    logging.basicConfig(level=level, format='%(levelname)s: %(message)s')

    # Create the TensorFlow session.
    tf_config = tf.ConfigProto()
    tf_config.allow_soft_placement = True
    session = tf.Session(config=tf_config)

    # Load config file for each model.
    configs = []
    for model in settings.models:
        config = load_config_from_json_file(model)
        setattr(config, 'reload', model)
        configs.append(config)

    # Create the model graphs and restore their variables.
    logging.debug("Loading models\n")
    models = []

    # ============= 19/8/16 KP ============
    warning('='*20 + 'Model Config to Load')
    warning(settings.models)
    # =====================================

    for i, config in enumerate(configs):
        with tf.variable_scope("model%d" % i) as scope:
            if config.model_type == "transformer":
                model = TransformerModel(config)
            else:
                model = rnn_model.RNNModel(config)
            saver = model_loader.init_or_restore_variables(config, session,
                                                           ensemble_scope=scope)
            model.sampling_utils = SamplingUtils(settings)
            models.append(model)

    # ============= 19/8/16 KP ============
    model_summary()
    # =====================================

    # TODO Ensembling is currently only supported for RNNs, so if
    # TODO len(models) > 1 then check models are all rnn

    # Translate the source file.
    inference.translate_file(input_file=settings.input,
                             output_file=settings.output,
                             session=session,
                             models=models,
                             configs=configs,
                             beam_size=settings.beam_size,
                             nbest=settings.n_best,
                             minibatch_size=settings.minibatch_size,
                             maxibatch_size=settings.maxibatch_size,
                             normalization_alpha=settings.normalization_alpha)
Ejemplo n.º 42
0
 def evaluate(self, meta, params, default=None, line=None):
     if isinstance(meta, self.base_type):
         return meta
     elif self.warning_only:
         logger.warning(line, 'Value (1) has to be of type (2): ', meta, self.base_type)
         return Commands.Remove
     else:
         return False
    def get_title(self) -> str:
        elm = self.page.select_one(".c-article-title")

        if not elm:
            logger.warning(f"{self.url}のタイトルが取得できませんでした。")
            return ""

        return elm.get_text()
Ejemplo n.º 44
0
 def __init__(self):
     log_dir = logger.get_logger_dir()
     if log_dir is None:
         logger.warning("Log directory unset, Please use logger.auto_set_dir() before")
         logger.auto_set_dir()
         log_dir = logger.get_logger_dir()
         logger.warning("logger dir:{}".format(log_dir))
     self.writer = tf.summary.FileWriter(log_dir)
Ejemplo n.º 45
0
def process(pkt, label, t_start):
    ip = pkt.getlayer(IP)
    ether = pkt.getlayer(Ether)
    d.warning(pprint.pformat(pkt))
    stack = pkt.getlayer(TCP)
    #func.addData(ip.src, ip.dst, "tcp", label)

    func.getDetectionData(pkt, ip.src, ip.dst, "tcp", label, t_start)
Ejemplo n.º 46
0
 def go(self):
     for parent, dirnames, filenames in os.walk(self.wpath):
         for dirname in dirnames:
             if (os.path.exists(self.wpath+dirname+'/'+'task.xml')==True):
                 logger.info( '['+self.name+'] [NEWTASK] tfile_found: '+ dirname+'/'+'task.xml' )
                 self.taskqueue.put(dirname)
             else:
                 logger.warning( '['+self.name+'] [BADTASK] tfile_not_found: '+ dirname+'/'+'task.xml' )
Ejemplo n.º 47
0
def get_response(url):
    try:
        return urllib2.urlopen(url)
    except (urllib2.HTTPError, urllib2.URLError):
        logger.warning('File: %s not found.' % url)
    except Exception as error:
        logger.error('Exception: %s' % (traceback.format_exc(), error))
    return None
Ejemplo n.º 48
0
def get_response(url):
    try:
        return urllib2.urlopen(url)
    except (urllib2.HTTPError, urllib2.URLError):
        logger.warning('File: %s not found.' % url)
    except Exception as error:
        logger.error('Exception: %s' % (traceback.format_exc(), error))
    return None
Ejemplo n.º 49
0
 def handle_login(self, code, parameters, event, message):
     if parameters == '3':
         self.send_command('005', config.ENVISALINKPASS)
     if parameters == '1':
         self.send_command('001', '')
     if parameters == '0':
         logger.warning('Incorrect envisalink password')
         sys.exit(0)
Ejemplo n.º 50
0
 def check_terrible_worker(self):
     count = 1
     for worker in self.workers:
         if (worker.is_working()
                 and worker.is_doing_long_job(self.max_wait_time)):
             self.handle_terrible_worker(worker)
             count += 1
     logger.warning('有%d个工作者超时工作,最长期望时间为%f' % (count, self.max_wait_time))
Ejemplo n.º 51
0
 def put_job(self, job, block=True, timeout=10):
     #todo : job类型检查,确定是Job的子类
     try:
         self._job_queue.put(job, block, timeout)
     except Queue.Full, e:
         logger.warning(
             'All workers are busy now, more workers are needed!')
         raise NoIdleWorkerException('There is no idle worker now!')
Ejemplo n.º 52
0
 def fromhdf5(cls, *args, **kwargs):
     try:
         return cls._backend.__dict__[cls.__name__].fromhdf5(
             *args, **kwargs)
     except AttributeError as e:
         warning("op2 object %s does not implement fromhdf5 method" %
                 cls.__name__)
         raise e
Ejemplo n.º 53
0
 def handle_login(self, code, parameters, event, message):
     if parameters == '3':
         self.send_command('005', config.ENVISALINKPASS)
     if parameters == '1':
         self.send_command('001')
     if parameters == '0':
         logger.warning('Incorrect envisalink password')
         sys.exit(0)
Ejemplo n.º 54
0
    def _solve(self, A, x, b):
        self._set_parameters()
        # Set up the operator only if it has changed
        if not self.getOperators()[0] == A.handle:
            self.setOperators(A.handle)
            if self.parameters['pc_type'] == 'fieldsplit' and A.sparsity.shape != (1, 1):
                rows, cols = A.sparsity.shape
                ises = []
                nlocal_rows = 0
                for i in range(rows):
                    if i < cols:
                        nlocal_rows += A[i, i].sparsity.nrows * A[i, i].dims[0]
                offset = 0
                if MPI.comm.rank == 0:
                    MPI.comm.exscan(nlocal_rows)
                else:
                    offset = MPI.comm.exscan(nlocal_rows)
                for i in range(rows):
                    if i < cols:
                        nrows = A[i, i].sparsity.nrows * A[i, i].dims[0]
                        ises.append((str(i), PETSc.IS().createStride(nrows, first=offset, step=1)))
                        offset += nrows
                self.getPC().setFieldSplitIS(*ises)
        if self.parameters['plot_convergence']:
            self.reshist = []

            def monitor(ksp, its, norm):
                self.reshist.append(norm)
                debug("%3d KSP Residual norm %14.12e" % (its, norm))
            self.setMonitor(monitor)
        # Not using super here since the MRO would call base.Solver.solve
        with timed_region("PETSc Krylov solver"):
            with b.vec_ro as bv:
                with x.vec as xv:
                    PETSc.KSP.solve(self, bv, xv)
        if self.parameters['plot_convergence']:
            self.cancelMonitor()
            try:
                import pylab
                pylab.semilogy(self.reshist)
                pylab.title('Convergence history')
                pylab.xlabel('Iteration')
                pylab.ylabel('Residual norm')
                pylab.savefig('%sreshist_%04d.png' %
                              (self.parameters['plot_prefix'], self._count))
            except ImportError:
                warning("pylab not available, not plotting convergence history.")
        r = self.getConvergedReason()
        debug("Converged reason: %s" % self._reasons[r])
        debug("Iterations: %s" % self.getIterationNumber())
        debug("Residual norm: %s" % self.getResidualNorm())
        if r < 0:
            msg = "KSP Solver failed to converge in %d iterations: %s (Residual norm: %e)" \
                % (self.getIterationNumber(), self._reasons[r], self.getResidualNorm())
            if self.parameters['error_on_nonconvergence']:
                raise RuntimeError(msg)
            else:
                warning(msg)
Ejemplo n.º 55
0
 def evaluate(self, meta, params, default=None, line=None):
     if isinstance(meta, basestring) and built_in_datatypes.is_built_in_datatype(meta):
         return meta
     elif isinstance(meta, basestring) and self.warning_only:
         logger.warning(line, 'Value is not a built in datatype: ', meta)
         return Commands.Remove
     else:
         # TODO logger.error(line, 'Value is not a built in datatype: ', meta)
         return False
Ejemplo n.º 56
0
def _detect_openmp_flags():
    p = Popen(['mpicc', '--version'], stdout=PIPE, shell=False)
    _version, _ = p.communicate()
    if _version.find('Free Software Foundation') != -1:
        return '-fopenmp', 'gomp'
    elif _version.find('Intel Corporation') != -1:
        return '-openmp', 'iomp5'
    else:
        warning('Unknown mpicc version:\n%s' % _version)
        return '', ''
Ejemplo n.º 57
0
 def evaluate(self, meta, params, default=None, line=None):
     result = NumberPattern()
     if isinstance(meta, basestring):
         logger.debug(line, 'Number pattern property: ', meta)
         result.value = meta
         return result
     else:
         # issue a warning
         logger.warning(line, 'value of number pattern property is not a string: ', meta)
         return result
Ejemplo n.º 58
0
def translate_list(item_list, translation):
    for index, item in enumerate(item_list):
        if not isinstance(item, list):
            try:
                item_list[index] = configParser.get(translation, item)
            except ConfigParser.NoOptionError, e:
                item_list[index] = item
                logger.warning('%s\nNo translation for value: %s' % (e, item))
        else:
            translate_list(item)
Ejemplo n.º 59
0
    def convert(self, dict, need_break = True):
        self._replace_keys(dict)
        self._translate_keys(dict)
        self._remove_duplicate(dict)

        items = []
        for entry in ORDER_ENTRIES:
            if not entry:
                # empty string means a blank line for break
                if need_break:
                    items.append(('', ''))
                continue

            if entry in dict:
                items.append((entry, dict[entry]))
                del dict[entry]

        subpkgs = []
        try:
            subpkgs_list = dict['SubPackages']
            del dict['SubPackages']

            for sub_items in subpkgs_list:
                subpkgs.append(self.convert(sub_items, False))
        except:
            pass

        if 'extra' in dict:
            extra = dict['extra']
            del dict['extra']
        else:
            extra = {}

        for k, v in dict.iteritems():
            logger.warning('un-ordered entry: %s' % k)
            items.append((k, v))

        if extra:
            try:
                # clean up empty lines in %files
                files = [s.strip() for s in extra['Files'] if s.strip()]
                if files:
                    extra['Files'] = files
                else:
                    del extra['Files']
            except KeyError:
                pass

            if extra: # check it again
                items.append(('extra', extra))

        if subpkgs:
            items.append(('SubPackages', subpkgs))

        return items
Ejemplo n.º 60
0
    def mount(self, mode = None, check = True, plugins = True, **kwargs):
        """
        High-level `mount`. Check if the selected ``mode`` need to be mounted,
        select the low-level backend and mount it.

        Args:
            mode (str):     mode to use. One of 'local', 'ssh', 'local_encfs' or
                            'ssh_encfs'
            check (bool):   if ``True`` run
                            :py:func:`MountControl.pre_mount_check` before
                            mounting
            plugins (bool): if ``True`` run
                            :py:func:`pluginmanager.PluginManager.do_mount`
                            before mount
            **kwargs:       keyword arguments paste to low-level
                            :py:class:`MountControl` subclass backend

        Returns:
            str:            Hash ID used as mountpoint

        Raises:
            exceptions.MountException:
                            if a check failed
            exceptions.HashCollision:
                            if Hash ID was used before but umount info wasn't
                            identical
        """
        if plugins:
            self.config.PLUGIN_MANAGER.load_plugins(cfg = self.config)
            self.config.PLUGIN_MANAGER.do_mount()
        if mode is None:
            mode = self.config.get_snapshots_mode(self.profile_id)

        if self.config.SNAPSHOT_MODES[mode][0] is None:
            #mode doesn't need to mount
            return 'local'
        else:
            while True:
                try:
                    mounttools = self.config.SNAPSHOT_MODES[mode][0]
                    backend = mounttools(cfg = self.config,
                                         profile_id = self.profile_id,
                                         tmp_mount = self.tmp_mount,
                                         mode = mode,
                                         parent = self.parent,
                                         read_only = self.read_only,
                                         **kwargs)
                    return backend.mount(check = check)
                except HashCollision as ex:
                    logger.warning(str(ex), self)
                    del backend
                    check = False
                    continue
                break