def scan_file(file_name, subfile):
    # start analysis with basic info
    pe = pefile.PE(file_name)
    machine = pe.FILE_HEADER.Machine
    all_results = OrderedDict( [
        ('MD5',             get_hash(file_name, 'md5')),
        ('SHA1',            get_hash(file_name, 'sha1')),
        ('SHA256',          get_hash(file_name, 'sha256')),
        ('Type',            commands.getoutput('file %s' % file_name).split(file_name + ': ')[1]),
        ('Size',            (os.path.getsize(file_name))/1000),
        ('SSDeep',          get_ssdeep(file_name)),
        #('ImpHash',        pe.get_imphash()),
        ('Arch',            pefile.MACHINE_TYPE[machine]),
        ('Entry Point',     hex(pe.OPTIONAL_HEADER.AddressOfEntryPoint)),
        ('Compiled',        datetime.datetime.fromtimestamp(pe.FILE_HEADER.TimeDateStamp)),
        ('Start Address',   grep_saddress(file_name))
    ] )

    print '\n[ %s ]' % file_name
    for key, value in all_results.iteritems():
        if key == 'Compiled' or key == 'Entry Point' or key == 'Start Address':
            print '%s:\t\t%s' % (key, value)
        else:
            print '%s:\t\t\t%s' % (key, value)

    if subfile:
        print '\n'
        check_subfile(file_name)
Exemple #2
0
 def __new__(cls, name, bases, attrs):
     attrs['prefix'] = attrs.get('prefix', 'form')
     attrs = OrderedDict(
             sorted([(k, v) for k, v in attrs.iteritems()],
                 cmp=lambda x, y: cmp(getattr(x[1], 'order_counter', None),
                                      getattr(y[1], 'order_counter', None))
                 )
             )
     return validators.DeclarativeMeta.__new__(cls, name, bases, attrs)
Exemple #3
0
def error_list_from_form(form, prefix_with_fields=True):
    results = []
    errors = OrderedDict(form.errors)
    for field, messages in errors.iteritems():
        for message in messages:
            entry = OrderedDict()
            if prefix_with_fields and field != '__all__':
                message = '%s: %s' % (field, message)
            entry['message'] = unicode(message)
            results.append(entry)
    return results
def sliceDocumentation(fileIn, outputDirectory):
	"""
	This Definition slices given documentation file.

	:param fileIn: File to convert. ( String )
	:param outputDirectory: Output directory. ( String )
	"""

	LOGGER.info("{0} | Slicing '{1}' file!".format(sliceDocumentation.__name__, fileIn))
	file = File(fileIn)
	file.cache()

	slices = OrderedDict()
	for i, line in enumerate(file.content):
		search = re.search(r"^\.\. \.(\w+)", line)
		if search:
			slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

	index = 0
	for slice, sliceStart in slices.iteritems():
		sliceFile = File(os.path.join(outputDirectory, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
		LOGGER.info("{0} | Outputing '{1}' file!".format(sliceDocumentation.__name__, sliceFile.path))
		sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \
		len(file.content)

		for i in range(sliceStart, sliceEnd):
			skipLine = False
			for item in CONTENT_DELETION:
				if re.search(item, file.content[i]):
					LOGGER.info("{0} | Skipping Line '{1}' with '{2}' content!".format(sliceDocumentation.__name__,
																						i,
																						item))
					skipLine = True
					break

			if skipLine:
				continue

			line = file.content[i]
			for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
				line = re.sub(pattern, value, line)

			search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
			if search:
				LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(sliceDocumentation.__name__,
																			i,
																			search.groups()[0]))
				line = "-  :ref:`{0}`\n".format(search.groups()[0])
			sliceFile.content.append(line)

		sliceFile.write()
		index += 1
Exemple #5
0
def generate(output_json, output_path):
    """
    Generate metadata for boxes (sysadmin tool)
    """
    boxes = baseboxes()
    semver = re.compile(r'\d+.\d+.\d+')

    metadata = OrderedDict()
    for infra in boxes:
        for box in boxes[infra]:
            box, version = box[:box.rfind('-')], box[box.rfind('-') + 1:-4]
            basebox = '/'.join([infra, box])

            if not semver.match(version):
                continue

            _generate_meta(metadata, basebox, box, version)

    # if the path is empty, default to json
    if output_json or not output_path:
        json.dump(metadata, sys.stdout)
        return

    if output_path:
        if not os.path.exists(output_path):
            os.mkdir(output_path)

    # create the proper folder architecture for vagrant
    for basebox, data in metadata.iteritems():
        infra, name = basebox.split('/')

        infra_dir = os.path.join(output_path, infra)
        if not os.path.exists(infra_dir):
            os.mkdir(infra_dir)

        box_file = os.path.join(infra_dir, name)
        with open(box_file, 'w') as outfile:
            json.dump(data, outfile)

    click.secho(
        'The metadata files have been saved in the %s directory.\n'
        'You can upload them to s3 with one of the following commands:\n'
        % output_path,
        fg='green')

    click.echo('s3cmd put --recursive %s -m'
               '\"application/json\" s3://%s/'
               % (output_path, basebox_bucket()))
    click.echo('aws s3 sync --content-type \"application/json\" '
               '%s s3://%s/meta/'
               % (output_path, basebox_bucket()))
 def add_folders(self, folders, sorted_by_values=True):
     """Add many folders to a menu. Can be sorted by folder name."""
     translations = {}
     for id in folders:
         value = self.xbmcaddon.translate(id)
         translations.update({id: value})
     if sorted_by_values:  # Default.
         # IMPORTANT! This is the suggested way by Python 2.7, but it seems
         # to have problems when first letter is accented.
         translations = \
             OrderedDict(sorted(translations.items(), key=lambda t: t[1]))
         for id, folder in translations.iteritems():
             self._add_folder(id, folder, folders[id])
     else:  # Sorted by key.
         for id, folder in folders.iteritems():
             self._add_folder(id, translations[id], folder)
Exemple #7
0
def get_result_details(fields):
    rowhtml = ''
    
    row_dict = {}

    for field, value in fields.items():
        if field.startswith('prop_') and not field.endswith('_exact'):
            prop_num = field[5:].strip()
            
            #sloppy (hopefully temporary) fix for Haystack auto-converting BM Reg #s to lists of ints
            if prop_num == '33':
                long_id = fields.get('id')
                id_group = long_id.split('.')
                id = id_group[2]
                vals = SubjectProperty.objects.filter(property_id=prop_num, subject_id=id)
                for i, v in enumerate(vals):
                    if i > 0:
                        value = value + '; ' + v.property_value
                    else: 
                        value = v.property_value                
            try:
                prop = DescriptiveProperty.objects.get(id=prop_num)
                prop_order = prop.order
                try:
                    row = '<tr><td>' + prop.property + '</td><td>' + value + '</td></tr>'
                except TypeError:
                    row = '<tr><td>' + prop.property + '</td><td>' + str(value) + '</td></tr>'
                
                #sloppy way of handling properties with same order number
                success = False
                while not success:
                    if prop_order in row_dict:
                        prop_order += 1
                    else:
                        success = True
                row_dict[prop_order] = row
                
            except DescriptiveProperty.DoesNotExist:
                continue

    ordered_dict = OrderedDict(sorted(row_dict.items()))
    
    for k, v in ordered_dict.iteritems():
        rowhtml += v
    
    return rowhtml
Exemple #8
0
 def write_C_function_if(self, func, returntype, strings):
     if not strings:
         raise ValueError("%s: No strings" % (self.__class__.__name__))
     types = {
         "vector"  : "std::vector<std::string>",
         "vectorF" : "std::vector<float>",
         "map"     : "std::map<std::string, std::string>",
         "mapF"    : "std::map<std::string, float>",
         }
     print "%s %s(const std::string id) {" % (types[returntype], func)
     print "    %s values;" % (types[returntype])
     strings_if = OrderedDict()
     for (k, v) in strings:
         if not " := " in v:
             raise ValueError("%s: A label is expected: %s" % (self.__class__.__name__, k))
         kk = v.strip("\"").split(" := ")[0].split("_")[0]
         if kk in strings_if:
             strings_if[kk].append((len(strings_if[kk]), v))
         else:
             strings_if[kk] = [(0, v)]
     print_if = True
     for (kk, strings) in strings_if.iteritems():
         if print_if:
             print "    if (id == \"%s\") {" % (kk)
             print_if = False
         else:
             print "    else if (id == \"%s\") {" % (kk)
         if returntype == "vector":
             print "        values.resize(%i, \"\");" % (len(strings))
         elif returntype == "vectorF":
             print "        values.resize(%i, -999.);" % (len(strings))
         for (k, v) in strings:
             if v.startswith("\"" + kk + " := "):
                 v = v.replace(kk + " := ", "")
             if returntype in ["map", "mapF"]:
                 print "        values[{k:18s}] = {v};".format(k=("\""+k+"\""), v=v)
             elif returntype in ["vector", "vectorF"]:
                 print "        values[{k}] = {v};".format(k=k, v=v)
         print "        return values;"
         print "    }"
     print "    return values;"
     print "}"
     print
Exemple #9
0
class expirationset:
    def __init__(self, retention_secs, callback=None):
        self._retention_secs = retention_secs
        self._data = OrderedDict()
        self._callback = callback

    def add(self, key):
        self.cleanup_stale()
        if(key in self._data):
            del self._data[key]
        self._data[key] = time()

    def discard(self, key):
        if(key in self._data):
            del self._data[key]
            if(self._callback is not None):
                self._callback(key, False)

    def __contains__(self, key):
        self.cleanup_stale()
        return key in self._data

    def cleanup_stale(self):
        curtime = time()
        while(True):
            try:
                key, regtime = self._data.iteritems().next()
            except StopIteration:
                break

            if(curtime-regtime < self._retention_secs):
                break
            
            del self._data[key]
            if(self._callback is not None):
                self._callback(key, True)
Exemple #10
0
 def check_unique_columns(self):
     """Check columns which should contain unique values
     actually do."""
     for colhead in self.UNIQUES:
         col = self.HEADINGS.index(colhead)
         rowsdata = [(i + self.HEADING_ROW, c.value) for i, c in \
                 enumerate(self.sheet.col_slice(
                     col, self.HEADING_ROW, self.sheet.nrows))]
         datarows = OrderedDict()
         for i, key in rowsdata:
             item = datarows.get(key)
             # don't count empty fields
             if key is None or key == "":
                 continue
             if item is None:
                 datarows[key] = [i]
             else:
                 datarows[key].append(i)
         for key, rows in datarows.iteritems():
             if len(rows) > 1:
                 header = self.sheet.cell(self.HEADING_ROW, col).value
                 self.add_error(
                         rows[0], "Duplicate on unique column: %s: '%s' %s" % (
                             header, key, [r+1 for r in rows[1:]]))
class FiniteDifference(Container):
    """ Differentiates a driver's workflow using the Finite Difference with
    Analytical Derivatives (FDAD) method. A variety of difference types are
    available for both first and second order."""

    implements(IDifferentiator)
    
    # pylint: disable-msg=E1101
    form = Enum("central", ["central", "forward", "backward"], iotype='in', \
                desc="Finite difference form (central, forward, backward).")
    
    default_stepsize = Float(1.0e-6, iotype='in', desc='Default finite ' + \
                             'difference step size.')
    
    def __init__(self):
        
        super(FiniteDifference, self).__init__()
        
        # This gets set in the callback
        _parent = None
        
        self.param_names = []
        self.objective_names = []
        self.eqconst_names = []
        self.ineqconst_names = []
        
        self.gradient_case = OrderedDict()
        self.gradient = {}
        
        self.hessian_ondiag_case = OrderedDict()
        self.hessian_offdiag_case = OrderedDict()
        self.hessian = {}
        
    def setup(self):
        """Sets some dimensions."""

        self.param_names = self._parent.get_parameters().keys()
        self.objective_names = self._parent.get_objectives().keys()
        
        try:
            self.ineqconst_names = self._parent.get_ineq_constraints().keys()
        except AttributeError:
            self.ineqconst_names = []
        try:
            self.eqconst_names = self._parent.get_eq_constraints().keys()
        except AttributeError:
            self.eqconst_names = []
        
        
    def get_derivative(self, output_name, wrt):
        """Returns the derivative of output_name with respect to wrt.
        
        output_name: string
            Name of the output in the local OpenMDAO hierarchy.
            
        wrt: string
            Name of the input in the local OpenMDAO hierarchy. The
            derivative is with respect to this variable.
        """
        
        return self.gradient[wrt][output_name]

    
    def get_2nd_derivative(self, output_name, wrt):
        """Returns the 2nd derivative of output_name with respect to both vars
        in the tuple wrt.
        
        output_name: string
            Name of the output in the local OpenMDAO hierarchy.
            
        wrt: tuple containing two strings
            Names of the inputs in the local OpenMDAO hierarchy. The
            derivative is with respect to these 2 variables.
        """
        
        return self.hessian[wrt[0]][wrt[1]][output_name]

    
    def get_gradient(self, output_name=None):
        """Returns the gradient of the given output with respect to all 
        parameters.
        
        output_name: string
            Name of the output in the local OpenMDAO hierarchy.
        """
        
        return array([self.gradient[wrt][output_name] for wrt in self.param_names])
        
        
    def get_Hessian(self, output_name=None):
        """Returns the Hessian matrix of the given output with respect to
        all parameters.
        
        output_name: string
            Name of the output in the local OpenMDAO hierarchy.
        """       
                

        #return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])
        return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])


    def calc_gradient(self):
        """Calculates the gradient vectors for all outputs in this Driver's
        workflow."""
        
        # Each component runs its calc_derivatives method.
        # We used to do this in the driver instead, but we've moved it in
        # here to make the interface more uniform.
        self._parent.calc_derivatives(first=True)
        
        self.setup()

        # Create our 2D dictionary the first time we execute.
        if not self.gradient:
            for name in self.param_names:
                self.gradient[name] = {}
                
        # Pull initial state and stepsizes from driver's parameters
        base_param = OrderedDict()
        stepsize = {}
        for key, item in self._parent.get_parameters().iteritems():
            base_param[key] = item.evaluate()
            
            if item.fd_step:
                stepsize[key] = item.fd_step
            else:
                stepsize[key] = self.default_stepsize

        # For Forward or Backward diff, we want to save the baseline
        # objective and constraints. These are also needed for the
        # on-diagonal Hessian terms, so we will save them in the class
        # later.
        base_data = self._run_point(base_param)
        
        # Set up problem based on Finite Difference type
        if self.form == 'central':
            deltas = [1, -1]
            func = diff_1st_central
        elif self.form == 'forward':
            deltas = [1, 0]
            func = diff_1st_fwrdbwrd
        else:
            deltas = [0, -1]
            func = diff_1st_fwrdbwrd

        self.gradient_case = OrderedDict()

        # Assemble input data
        for param in self.param_names:
            
            pcase = []
            for j_step, delta in enumerate(deltas):
                
                case = base_param.copy()
                case[param] += delta*stepsize[param]
                pcase.append({ 'param': case })
                
            self.gradient_case[param] = pcase
            
        # Run all "cases".
        # TODO - Integrate OpenMDAO's concurrent processing capability once it
        # is formalized. This operation is inherently paralellizable.
        for key, case in self.gradient_case.iteritems():
            for ipcase, pcase in enumerate(case):
                if deltas[ipcase]:
                    pcase['data'] = self._run_point(pcase['param'])
                else:
                    pcase['data'] = base_data
                
        
        # Calculate gradients
        for key, case in self.gradient_case.iteritems():
            
            eps = stepsize[key]
            
            for name in list(self.objective_names + \
                             self.eqconst_names + \
                             self.ineqconst_names):
                self.gradient[key][name] = \
                    func(case[0]['data'][name],
                         case[1]['data'][name], eps)

        # Save these for Hessian calculation
        self.base_param = base_param
        self.base_data = base_data

        
    def calc_hessian(self, reuse_first=False):
        """Returns the Hessian matrix for all outputs in the Driver's
        workflow.
        
        reuse_first: bool
            Switch to reuse some data from the gradient calculation so that
            we don't have to re-run some points we already ran (namely the
            baseline, +eps, and -eps cases.) Obviously you do this when the
            driver needs gradient and hessian information at the same point,
            and calls calc_gradient before calc_hessian.
        """
        
        # Each component runs its calc_derivatives method.
        # We used to do this in the driver instead, but we've moved it in
        # here to make the interface more uniform.
        self._parent.calc_derivatives(second=True)
        
        self.setup()
        
        # Create our 3D dictionary the first time we execute.
        if not self.hessian:
            for name1 in self.param_names:
                self.hessian[name1] = {}
                for name2 in self.param_names:
                    self.hessian[name1][name2] = {}
                
        self.hessian_ondiag_case = OrderedDict()
        self.hessian_offdiag_case = OrderedDict()

        # Pull stepsizes from driver's parameters
        base_param = OrderedDict()
        stepsize = {}
        for key, item in self._parent.get_parameters().iteritems():
            
            if item.fd_step:
                stepsize[key] = item.fd_step
            else:
                stepsize[key] = self.default_stepsize

        # Diagonal terms in Hessian always need base point
        # Usually, we will have saved this when we calculated
        # the gradient.
        if reuse_first:
            base_param = self.base_param
            base_data = self.base_data
        else:
            # Pull initial state from driver's parameters
            for key, item in self._parent.get_parameters().iteritems():
                base_param[key] = item.evaluate()
                    
            base_data = self._run_point(base_param)
            
        # Assemble input data
        # Cases : ondiag [fp, fm]
        deltas = [1, -1]
        for param in self.param_names:
            
            pcase = []
            for j_step, delta in enumerate(deltas):
                
                case = base_param.copy()
                case[param] += delta*stepsize[param]
                pcase.append({ 'param': case })
                
            self.hessian_ondiag_case[param] = pcase
            
        # Assemble input data
        # Cases : offdiag [fpp, fpm, fmp, fmm]
        deltas = [[1, 1],
                  [1, -1],
                  [-1, 1],
                  [-1, -1]]
        for i, param1 in enumerate(self.param_names):
            
            offdiag = {}
            for param2 in self.param_names[i+1:]:
            
                pcase = []
                for delta in deltas:
                    
                    case = base_param.copy()
                    case[param1] += delta[0]*stepsize[param1]
                    case[param2] += delta[1]*stepsize[param2]
                    pcase.append({ 'param': case })
                offdiag[param2] = pcase
                    
            self.hessian_offdiag_case[param1] = offdiag
            
        # Run all "cases".
        # TODO - Integrate OpenMDAO's concurrent processing capability once it
        # is formalized. This operation is inherently paralellizable.
        
        # We don't need to re-run on-diag cases if the gradients were
        # calculated with Central Difference.
        if reuse_first and self.form=='central':
            for key, case in self.hessian_ondiag_case.iteritems():
                
                gradient_case = self.gradient_case[key]
                for ipcase, pcase in enumerate(case):
                    
                    gradient_ipcase = gradient_case[ipcase]
                    pcase['data'] = gradient_ipcase['data'] 
        else:
            for case in self.hessian_ondiag_case.values():
                for pcase in case:
                    data = self._run_point(pcase['param'])
                    pcase['data'] = data

        # Off-diag cases must always be run.
        for cases in self.hessian_offdiag_case.values():
            for case in cases.values():
                for pcase in case:
                    pcase['data'] = self._run_point(pcase['param'])

                    
        # Calculate Hessians - On Diagonal
        for key, case in self.hessian_ondiag_case.iteritems():
            
            eps = stepsize[key]
            
            for name in list(self.objective_names + \
                             self.eqconst_names + \
                             self.ineqconst_names):
                self.hessian[key][key][name] = \
                    diff_2nd_xx(case[0]['data'][name],
                                base_data[name],
                                case[1]['data'][name], eps)
                
        # Calculate Hessians - Off Diagonal
        for key1, cases in self.hessian_offdiag_case.iteritems():
            
            eps1 = stepsize[key1]
            for key2, case in cases.iteritems():
                
                eps2 = stepsize[key2]
                
                for name in list(self.objective_names + \
                                 self.eqconst_names + \
                                 self.ineqconst_names):
                    self.hessian[key1][key2][name] = \
                        diff_2nd_xy(case[0]['data'][name],
                                    case[1]['data'][name],
                                    case[2]['data'][name],
                                    case[3]['data'][name],
                                    eps1, eps2)
                    
                    # Symmetry
                    # (Should ponder whether we should even store it.)
                    self.hessian[key2][key1][name] = \
                        self.hessian[key1][key2][name]
                    
    
    def _run_point(self, data_param):
        """Runs the model at a single point and captures the results. Note that 
        some differences require the baseline point."""

        dvals = [float(val) for val in data_param.values()]
        
        self._parent.set_parameters(dvals)

        # Run the model
        super(type(self._parent), self._parent).run_iteration()
        
        data = {}

        # Get Objectives
        for key, item in self._parent.get_objectives().iteritems():
            data[key] = item.evaluate(self._parent.parent)

        # Get Inequality Constraints
        if self.ineqconst_names:
            for key, item in self._parent.get_ineq_constraints().iteritems():
                val = item.evaluate(self._parent.parent)
                if '>' in val[2]:
                    data[key] = val[1]-val[0]
                else:
                    data[key] = val[0]-val[1]
        
        # Get Equality Constraints
        if self.eqconst_names:
            for key, item in self._parent.get_eq_constraints().iteritems():
                val = item.evaluate(self._parent.parent)
                if '>' in val[2]:
                    data[key] = val[1]-val[0]
                else:
                    data[key] = val[0]-val[1]
        
        return data
                    

    def reset_state(self):
        """Finite Difference does not leave the model in a clean state. If you
        require one, then run this method."""
        
        dvals = [float(val) for val in self.base_param.values()]
        self._parent.set_parameters(dvals)
        super(type(self._parent), self._parent).run_iteration()

        
    def raise_exception(self, msg, exception_class=Exception):
        """Raise an exception."""
        name = find_name(self._parent, self)
        self._parent.raise_exception("%s: %s" % (name,msg), exception_class)
class FileSystemPickleArtifact(Artifact):
    # Metadata
    def meta_filename(self):
        return "%s-meta.pickle" % (self.hashstring)

    def meta_filepath(self):
        return os.path.join(self.artifacts_dir, self.meta_filename())

    def save_meta(self):
        m = {}
        attrs_to_persist = set(self.META_ATTRS + self.HASH_WHITELIST) - set(['input_data_dict', 'inputs'])
        for a in attrs_to_persist:
            if hasattr(self, a):
                v = getattr(self, a)
                m[a] = v

        m['inputs'] = {}
        for k, a in self.inputs().iteritems():
            a.save()
            m['inputs'][k] = a.hashstring

        f = open(self.meta_filepath(), "w")
        pickle.dump(m, f)
        f.close()

    def load_meta(self):
        f = open(self.meta_filepath(), "r")
        m = pickle.load(f)
        f.close()

        self._inputs = dict((k, self.__class__.retrieve(h)) for (k, h) in m.pop('inputs').iteritems())

        for k, v in m.iteritems():
            setattr(self, k, v)

        # We only store filter name, not filter class, need to retrieve class from name
        if hasattr(self, "filter_name") and not hasattr(self, "filter_class"):
            self.filter_class = [k for n,k in self.FILTERS.iteritems() if k.__name__ == self.filter_name][0]

    # Input
    def load_input(self):
        """Load input data into memory, if applicable."""
        if self.binary_input:
            #not loading non-binary input
            pass
        elif self.initial:
            #initial artifact has no input
            pass
        elif self.additional:
            #additional artifact has no input
            pass
        elif len(self.input_data_dict) > 0:
            #we already have input data in memory
            pass
        elif not hasattr(self, 'previous_cached_output_filepath'):
            #no previous cached output, can't load
            pass
        else:
            f = open(self.previous_cached_output_filepath, "rb")
            data_dict = pickle.load(f)
            f.close()

            self.input_data_dict = OrderedDict() # maybe unnecessary
            for x in sorted(data_dict.keys()):
                k = x.split(":", 1)[1]
                self.input_data_dict[k] = data_dict[x]

    # Output
    def cached_output_filename(self):
        return "%s-output.pickle" % (self.hashstring)

    def cached_output_filepath(self):
        return os.path.join(self.artifacts_dir, self.cached_output_filename())

    def is_output_cached(self):
        # TODO add checksums to verify data hasn't changed
        if self.binary_output:
            return self.is_canonical_output_cached()
        else:
            return self.is_pickle_output_cached() and self.is_canonical_output_cached()

    def is_pickle_output_cached(self):
        fp = self.cached_output_filepath()
        return os.path.isfile(fp) and (os.path.getsize(fp) > 0)

    def is_canonical_output_cached(self):
        fp = self.filepath()
        return os.path.isfile(fp) and (os.path.getsize(fp) > 0)

    def save_output(self):
        if not self.is_complete():
            raise Exception("should not be calling save_output unless artifact is complete")

        if not self.data_dict or len(self.data_dict) == 0:
            # Our filter has written directly to an output file
            # We need to load this into memory first
            self.data_dict = OrderedDict()
            f = open(self.filepath(), 'r')
            data = f.read()
            f.close()
            self.data_dict['1'] = data

        # need to preserve ordering but we can't serialize OrderedDict
        # using JSON, so add sortable numbers to keys to preserve order
        data_dict = {}
        MAX = 10000
        if len(self.data_dict) >= MAX:
            raise Exception("""There is an arbitrary limit of %s dict items,
                           you can increase this if you need to.""" % MAX)
        i = -1
        for k, v in self.data_dict.iteritems():
            i += 1
            data_dict["%04d:%s" % (i, k)] = v

        # Write the JSON file.
        f = open(self.cached_output_filepath(), "w")
        pickle.dump(data_dict, f)
        f.close()

        # Write the canonical file.
        f = open(self.filepath(), 'w')
        f.write(self.output_text())
        f.close()

    def load_output(self):
        if not self.is_complete():
            raise Exception("should not be calling load_output unless artifact is complete")

        f = open(self.cached_output_filepath(), "r")
        data_dict = pickle.load(f)
        f.close()

        self.data_dict = OrderedDict() # maybe unnecessary
        for x in sorted(data_dict.keys()):
            k = x.split(":", 1)[1]
            self.data_dict[k] = data_dict[x]
class SectionsFileParser(foundations.io.File):
	"""
	Defines methods to parse sections file format files,
	an alternative configuration file parser is available directly with Python: :class:`ConfigParser.ConfigParser`.

	The parser given by this class has some major differences with Python :class:`ConfigParser.ConfigParser`:

		- | Sections and attributes are stored in their appearance order by default.
			( Using Python :class:`collections.OrderedDict` )
		- | A default section ( **_default** ) will store orphans attributes
			( Attributes appearing before any declared section ).
		- File comments are stored inside the :obj:`SectionsFileParser.comments` class property.
		- | Sections, attributes and values are whitespaces stripped by default
			but can also be stored with their leading and trailing whitespaces.
		- | Values are quotations markers stripped by default
			but can also be stored with their leading and trailing quotations markers.
		- Attributes are namespaced by default allowing sections merge without keys collisions.

	"""

	def __init__(self,
				 file=None,
				 splitters=("=", ":"),
				 namespaceSplitter="|",
				 commentLimiters=(";", "#"),
				 commentMarker="#",
				 quotationMarkers=("\"", "'", "`"),
				 rawSectionContentIdentifier="__raw__",
				 defaultsSection="_defaults",
				 preserveOrder=True):
		"""
		Initializes the class.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse(stripComments=False)
			<foundations.parsers.SectionsFileParser object at 0x293892011>
			>>> sectionsFileParser.sections.keys()
			[u'Section A', u'Section B']
			>>> sectionsFileParser.comments
			OrderedDict([(u'Section A|#0', {u'content': u'Comment.', u'id': 0})])

		:param file: Current file path.
		:type file: unicode
		:param splitters: Splitter characters.
		:type splitters: tuple or list
		:param namespaceSplitter: Namespace splitters character.
		:type namespaceSplitter: unicode
		:param commentLimiters: Comment limiters characters.
		:type commentLimiters: tuple or list
		:param commentMarker: Character use to prefix extracted comments idientifiers.
		:type commentMarker: unicode
		:param quotationMarkers: Quotation markers characters.
		:type quotationMarkers: tuple or list
		:param rawSectionContentIdentifier: Raw section content identifier.
		:type rawSectionContentIdentifier: unicode
		:param defaultsSection: Default section name.
		:type defaultsSection: unicode
		:param preserveOrder: Data order is preserved.
		:type preserveOrder: bool
		"""

		LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))

		foundations.io.File.__init__(self, file)

		# --- Setting class attributes. ---
		self.__splitters = None
		self.splitters = splitters
		self.__namespaceSplitter = None
		self.namespaceSplitter = namespaceSplitter
		self.__commentLimiters = None
		self.commentLimiters = commentLimiters
		self.__commentMarker = None
		self.commentMarker = commentMarker
		self.__quotationMarkers = None
		self.quotationMarkers = quotationMarkers
		self.__rawSectionContentIdentifier = None
		self.rawSectionContentIdentifier = rawSectionContentIdentifier
		self.__defaultsSection = None
		self.defaultsSection = defaultsSection
		self.__preserveOrder = None
		self.preserveOrder = preserveOrder

		if not preserveOrder:
			self.__sections = {}
			self.__comments = {}
		else:
			self.__sections = OrderedDict()
			self.__comments = OrderedDict()
		self.__parsingErrors = []

	#******************************************************************************************************************
	#***	Attributes properties.
	#******************************************************************************************************************
	@property
	def splitters(self):
		"""
		Property for **self.__splitters** attribute.

		:return: self.__splitters.
		:rtype: tuple or list
		"""

		return self.__splitters

	@splitters.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def splitters(self, value):
		"""
		Setter for **self.__splitters** attribute.

		:param value: Attribute value.
		:type value: tuple or list
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
				"splitters", value)
			for element in value:
				assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"splitters", element)
				assert len(element) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("splitter", element)
				assert not re.search(r"\w", element), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
					"splitter", element)
		self.__splitters = value

	@splitters.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def splitters(self):
		"""
		Deleter for **self.__splitters** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "splitters"))

	@property
	def namespaceSplitter(self):
		"""
		Property for **self.__namespaceSplitter** attribute.

		:return: self.__namespaceSplitter.
		:rtype: unicode
		"""

		return self.__namespaceSplitter

	@namespaceSplitter.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def namespaceSplitter(self, value):
		"""
		Setter for **self.__namespaceSplitter** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"namespaceSplitter", value)
			assert len(value) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("namespaceSplitter",
																							  value)
			assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
				"namespaceSplitter", value)
		self.__namespaceSplitter = value

	@namespaceSplitter.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def namespaceSplitter(self):
		"""
		Deleter for **self.__namespaceSplitter** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "namespaceSplitter"))

	@property
	def commentLimiters(self):
		"""
		Property for **self.__commentLimiters** attribute.

		:return: self.__commentLimiters.
		:rtype: tuple or list
		"""

		return self.__commentLimiters

	@commentLimiters.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def commentLimiters(self, value):
		"""
		Setter for **self.__commentLimiters** attribute.

		:param value: Attribute value.
		:type value: tuple or list
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
				"commentLimiters", value)
			for element in value:
				assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"commentLimiters", element)
		self.__commentLimiters = value

	@commentLimiters.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def commentLimiters(self):
		"""
		Deleter for **self.__commentLimiters** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "commentLimiters"))

	@property
	def commentMarker(self):
		"""
		Property for **self.__commentMarker** attribute.

		:return: self.__commentMarker.
		:rtype: unicode
		"""

		return self.__commentMarker

	@commentMarker.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def commentMarker(self, value):
		"""
		Setter for **self.__commentMarker** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"commentMarker", value)
			assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
				"commentMarker", value)
		self.__commentMarker = value

	@commentMarker.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def commentMarker(self):
		"""
		Deleter for **self.__commentMarker** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "commentMarker"))

	@property
	def quotationMarkers(self):
		"""
		Property for **self.__quotationMarkers** attribute.

		:return: self.__quotationMarkers.
		:rtype: tuple or list
		"""

		return self.__quotationMarkers

	@quotationMarkers.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def quotationMarkers(self, value):
		"""
		Setter for **self.__quotationMarkers** attribute.

		:param value: Attribute value.
		:type value: tuple or list
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
				"quotationMarkers", value)
			for element in value:
				assert type(element) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"quotationMarkers", element)
				assert len(element) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("quotationMarkers",
																									element)
				assert not re.search(r"\w", element), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
					"quotationMarkers", element)
		self.__quotationMarkers = value

	@quotationMarkers.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def quotationMarkers(self):
		"""
		Deleter for **self.__quotationMarkers** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "quotationMarkers"))

	@property
	def rawSectionContentIdentifier(self):
		"""
		Property for **self. __rawSectionContentIdentifier** attribute.

		:return: self.__rawSectionContentIdentifier.
		:rtype: unicode
		"""

		return self.__rawSectionContentIdentifier

	@rawSectionContentIdentifier.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def rawSectionContentIdentifier(self, value):
		"""
		Setter for **self. __rawSectionContentIdentifier** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"rawSectionContentIdentifier", value)
		self.__rawSectionContentIdentifier = value

	@rawSectionContentIdentifier.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def rawSectionContentIdentifier(self):
		"""
		Deleter for **self. __rawSectionContentIdentifier** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "rawSectionContentIdentifier"))

	@property
	def defaultsSection(self):
		"""
		Property for **self.__defaultsSection** attribute.

		:return: self.__defaultsSection.
		:rtype: unicode
		"""

		return self.__defaultsSection

	@defaultsSection.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultsSection(self, value):
		"""
		Setter for **self.__defaultsSection** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"defaultsSection", value)
		self.__defaultsSection = value

	@defaultsSection.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultsSection(self):
		"""
		Deleter for **self.__defaultsSection** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultsSection"))

	@property
	def sections(self):
		"""
		Property for **self.__sections** attribute.

		:return: self.__sections.
		:rtype: OrderedDict or dict
		"""

		return self.__sections

	@sections.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def sections(self, value):
		"""
		Setter for **self.__sections** attribute.

		:param value: Attribute value.
		:type value: OrderedDict or dict
		"""

		if value is not None:
			assert type(value) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
			'OrderedDict' or 'dict'!".format("sections", value)
			for key, element in value.iteritems():
				assert type(key) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"sections", key)
				assert type(element) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
				'OrderedDict' or 'dict'!".format("sections", key)
		self.__sections = value

	@sections.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def sections(self):
		"""
		Deleter for **self.__sections** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "sections"))

	@property
	def comments(self):
		"""
		Property for **self.__comments** attribute.

		:return: self.__comments.
		:rtype: OrderedDict or dict
		"""

		return self.__comments

	@comments.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def comments(self, value):
		"""
		Setter for **self.__comments** attribute.

		:param value: Attribute value.
		:type value: OrderedDict or dict
		"""

		if value is not None:
			assert type(value) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
			'OrderedDict' or 'dict'!".format("comments", value)
			for key, element in value.iteritems():
				assert type(key) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
					"comments", key)
				assert type(element) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
				'OrderedDict' or 'dict'!".format("comments", key)
		self.__comments = value

	@comments.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def comments(self):
		"""
		Deleter for **self.__comments** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "comments"))

	@property
	def parsingErrors(self):
		"""
		Property for **self.__parsingErrors** attribute.

		:return: self.__parsingErrors.
		:rtype: list
		"""

		return self.__parsingErrors

	@parsingErrors.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def parsingErrors(self, value):
		"""
		Setter for **self.__parsingErrors** attribute.

		:param value: Attribute value.
		:type value: list
		"""

		if value is not None:
			assert type(value) is list, "'{0}' attribute: '{1}' type is not 'list'!".format("parsingErrors", value)
			for element in value:
				assert issubclass(element.__class__, foundations.exceptions.AbstractParsingError), \
					"'{0}' attribute: '{1}' is not a '{2}' subclass!".format(
						"parsingErrors", element, foundations.exceptions.AbstractParsingError.__class__.__name__)
		self.__parsingErrors = value

	@parsingErrors.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def parsingErrors(self):
		"""
		Deleter for **self.__parsingErrors** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "parsingErrors"))

	@property
	def preserveOrder(self):
		"""
		Property for **self.__preserveOrder** attribute.

		:return: self.__preserveOrder.
		:rtype: bool
		"""

		return self.__preserveOrder

	@preserveOrder.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def preserveOrder(self, value):
		"""
		Setter method for **self.__preserveOrder** attribute.

		:param value: Attribute value.
		:type value: bool
		"""

		if value is not None:
			assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("preserveOrder", value)
		self.__preserveOrder = value

	@preserveOrder.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def preserveOrder(self):
		"""
		Deleter method for **self.__preserveOrder** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "preserveOrder"))

	#******************************************************************************************************************
	#***	Class methods.
	#******************************************************************************************************************
	def __getitem__(self, section):
		"""
		Reimplements the :meth:`object.__getitem__` method.

		:param section: Section name.
		:type section: unicode
		:return: Layout.
		:rtype: Layout
		"""

		return self.__sections.__getitem__(section)

	def __setitem__(self, section, value):
		"""
		Reimplements the :meth:`object.__getitem__` method.

		:param section: Section name.
		:type section: unicode
		:param section: Value.
		:type section: dict
		:return: Layout.
		:rtype: Layout
		"""

		return self.__sections.__setitem__(section, value)

	def __iter__(self):
		"""
		Reimplements the :meth:`object.__iter__` method.

		:return: Layouts iterator.
		:rtype: object
		"""

		return self.__sections.iteritems()

	def __contains__(self, section):
		"""
		Reimplements the :meth:`object.__contains__` method.

		:param section: Section name.
		:type section: unicode
		:return: Section existence.
		:rtype: bool
		"""

		return self.sectionExists(section)

	def __len__(self):
		"""
		Reimplements the :meth:`object.__len__` method.

		:return: Sections count.
		:rtype: int
		"""

		return len(self.__sections)

	@foundations.exceptions.handleExceptions(foundations.exceptions.FileStructureParsingError)
	def parse(self,
			  rawSections=None,
			  namespaces=True,
			  stripComments=True,
			  stripWhitespaces=True,
			  stripQuotationMarkers=True,
			  raiseParsingErrors=True):
		"""
		Process the file content and extracts the sections / attributes
			as nested :class:`collections.OrderedDict` dictionaries or dictionaries.

		Usage::

			>>> content = ["; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse(stripComments=False)
			<foundations.parsers.SectionsFileParser object at 0x860323123>
			>>> sectionsFileParser.sections.keys()
			[u'_defaults']
			>>> sectionsFileParser.sections["_defaults"].values()
			[u'Value A', u'Value B']
			>>> sectionsFileParser.parse(stripComments=False, stripQuotationMarkers=False)
			<foundations.parsers.SectionsFileParser object at 0x860323123>
			>>> sectionsFileParser.sections["_defaults"].values()
			[u'"Value A"', u'"Value B"']
			>>> sectionsFileParser.comments
			OrderedDict([(u'_defaults|#0', {u'content': u'Comment.', u'id': 0})])
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x860323123>
			>>> sectionsFileParser.sections["_defaults"]
			OrderedDict([(u'_defaults|Attribute 1', u'Value A'), (u'_defaults|Attribute 2', u'Value B')])
			>>> sectionsFileParser.parse(namespaces=False)
			<foundations.parsers.SectionsFileParser object at 0x860323123>
			>>> sectionsFileParser.sections["_defaults"]
			OrderedDict([(u'Attribute 1', u'Value A'), (u'Attribute 2', u'Value B')])

		:param rawSections: Ignored raw sections.
		:type rawSections: tuple or list
		:param namespaces: Attributes and comments are namespaced.
		:type namespaces: bool
		:param stripComments: Comments are stripped.
		:type stripComments: bool
		:param stripWhitespaces: Whitespaces are stripped.
		:type stripWhitespaces: bool
		:param stripQuotationMarkers: Attributes values quotation markers are stripped.
		:type stripQuotationMarkers: bool
		:param raiseParsingErrors: Raise parsing errors.
		:type raiseParsingErrors: bool
		:return: SectionFileParser instance.
		:rtype: SectionFileParser
		"""

		LOGGER.debug("> Reading sections from: '{0}'.".format(self.path))

		if not self.content:
			self.read()

		attributes = {} if not self.__preserveOrder else OrderedDict()
		section = self.__defaultsSection
		rawSections = rawSections or []

		commentId = 0
		for i, line in enumerate(self.content):
			# Comments matching.
			search = re.search(r"^\s*[{0}](?P<comment>.+)$".format("".join(self.__commentLimiters)), line)
			if search:
				if not stripComments:
					comment = namespaces and foundations.namespace.setNamespace(section, "{0}{1}".format(
						self.__commentMarker, commentId), self.__namespaceSplitter) or \
							  "{0}{1}".format(self.__commentMarker, commentId)
					self.__comments[comment] = {"id": commentId, "content": stripWhitespaces and \
																			search.group(
																				"comment").strip() or search.group(
						"comment")}
					commentId += 1
				continue

			# Sections matching.
			search = re.search(r"^\s*\[(?P<section>.+)\]\s*$", line)
			if search:
				section = stripWhitespaces and search.group("section").strip() or search.group("section")
				if not self.__preserveOrder:
					attributes = {}
				else:
					attributes = OrderedDict()
				rawContent = []
				continue

			if section in rawSections:
				rawContent.append(line)
				attributes[self.__rawSectionContentIdentifier] = rawContent
			else:
				# Empty line matching.
				search = re.search(r"^\s*$", line)
				if search:
					continue

				# Attributes matching.
				search = re.search(r"^(?P<attribute>.+?)[{0}](?P<value>.+)$".format("".join(self.__splitters)), line) \
					or re.search(r"^(?P<attribute>.+?)[{0}]\s*$".format("".join(self.__splitters)), line)
				if search:
					attribute = search.group("attribute").strip() if stripWhitespaces else search.group("attribute")
					attribute = foundations.namespace.setNamespace(section, attribute, self.__namespaceSplitter) \
						if namespaces else attribute

					if len(search.groups()) == 2:
						value = search.group("value").strip() if stripWhitespaces else search.group("value")
						attributes[attribute] = value.strip("".join(self.__quotationMarkers)) \
							if stripQuotationMarkers else value
					else:
						attributes[attribute] = None
				else:
					self.__parsingErrors.append(foundations.exceptions.AttributeStructureParsingError(
						"Attribute structure is invalid: {0}".format(line), i + 1))

			self.__sections[section] = attributes

		LOGGER.debug("> Sections: '{0}'.".format(self.__sections))
		LOGGER.debug("> '{0}' file parsing done!".format(self.path))

		if self.__parsingErrors and raiseParsingErrors:
			raise foundations.exceptions.FileStructureParsingError(
				"{0} | '{1}' structure is invalid, parsing exceptions occured!".format(self.__class__.__name__,
																					   self.path))

		return self

	def sectionExists(self, section):
		"""
		Checks if given section exists.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x845683844>
			>>> sectionsFileParser.sectionExists("Section A")
			True
			>>> sectionsFileParser.sectionExists("Section C")
			False

		:param section: Section to check existence.
		:type section: unicode
		:return: Section existence.
		:rtype: bool
		"""

		if section in self.__sections:
			LOGGER.debug("> '{0}' section exists in '{1}'.".format(section, self))
			return True
		else:
			LOGGER.debug("> '{0}' section doesn't exists in '{1}'.".format(section, self))
			return False

	def attributeExists(self, attribute, section):
		"""
		Checks if given attribute exists.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x234564563>
			>>> sectionsFileParser.attributeExists("Attribute 1", "Section A")
			True
			>>> sectionsFileParser.attributeExists("Attribute 2", "Section A")
			False

		:param attribute: Attribute to check existence.
		:type attribute: unicode
		:param section: Section to search attribute into.
		:type section: unicode
		:return: Attribute existence.
		:rtype: bool
		"""

		if foundations.namespace.removeNamespace(attribute, rootOnly=True) in self.getAttributes(section,
																								 stripNamespaces=True):
			LOGGER.debug("> '{0}' attribute exists in '{1}' section.".format(attribute, section))
			return True
		else:
			LOGGER.debug("> '{0}' attribute doesn't exists in '{1}' section.".format(attribute, section))
			return False

	def getAttributes(self, section, stripNamespaces=False):
		"""
		Returns given section attributes.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x125698322>
			>>> sectionsFileParser.getAttributes("Section A")
			OrderedDict([(u'Section A|Attribute 1', u'Value A')])
			>>> sectionsFileParser.preserveOrder=False
			>>> sectionsFileParser.getAttributes("Section A")
			{u'Section A|Attribute 1': u'Value A'}
			>>> sectionsFileParser.preserveOrder=True
			>>> sectionsFileParser.getAttributes("Section A", stripNamespaces=True)
			OrderedDict([(u'Attribute 1', u'Value A')])

		:param section: Section containing the requested attributes.
		:type section: unicode
		:param stripNamespaces: Strip namespaces while retrieving attributes.
		:type stripNamespaces: bool
		:return: Attributes.
		:rtype: OrderedDict or dict
		"""

		LOGGER.debug("> Getting section '{0}' attributes.".format(section))

		attributes = OrderedDict() if self.__preserveOrder else dict()
		if not self.sectionExists(section):
			return attributes

		if stripNamespaces:
			for attribute, value in self.__sections[section].iteritems():
				attributes[foundations.namespace.removeNamespace(attribute, rootOnly=True)] = value
		else:
			attributes.update(self.__sections[section])
		LOGGER.debug("> Attributes: '{0}'.".format(attributes))
		return attributes

	def getAllAttributes(self):
		"""
		Returns all sections attributes.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x845683844>
			>>> sectionsFileParser.getAllAttributes()
			OrderedDict([(u'Section A|Attribute 1', u'Value A'), (u'Section B|Attribute 2', u'Value B')])
			>>> sectionsFileParser.preserveOrder=False
			>>> sectionsFileParser.getAllAttributes()
			{u'Section B|Attribute 2': u'Value B', u'Section A|Attribute 1': u'Value A'}

		:return: All sections / files attributes.
		:rtype: OrderedDict or dict
		"""

		allAttributes = OrderedDict() if self.__preserveOrder else dict()

		for attributes in self.__sections.itervalues():
			for attribute, value in attributes.iteritems():
				allAttributes[attribute] = value
		return allAttributes

	@foundations.exceptions.handleExceptions(foundations.exceptions.FileStructureParsingError)
	def getValue(self, attribute, section, default=""):
		"""
		Returns requested attribute value.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x679302423>
			>>> sectionsFileParser.getValue("Attribute 1", "Section A")
			u'Value A'

		:param attribute: Attribute name.
		:type attribute: unicode
		:param section: Section containing the searched attribute.
		:type section: unicode
		:param default: Default return value.
		:type default: object
		:return: Attribute value.
		:rtype: unicode
		"""

		if not self.attributeExists(attribute, section):
			return default

		if attribute in self.__sections[section]:
			value = self.__sections[section][attribute]
		elif foundations.namespace.setNamespace(section, attribute) in self.__sections[section]:
			value = self.__sections[section][foundations.namespace.setNamespace(section, attribute)]
		LOGGER.debug("> Attribute: '{0}', value: '{1}'.".format(attribute, value))
		return value

	def setValue(self, attribute, section, value):
		"""
		Sets requested attribute value.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			<foundations.parsers.SectionsFileParser object at 0x109304209>
			>>> sectionsFileParser.setValue("Attribute 3", "Section C", "Value C")
			True

		:param attribute: Attribute name.
		:type attribute: unicode
		:param section: Section containing the searched attribute.
		:type section: unicode
		:param value: Attribute value.
		:type value: object
		:return: Definition success.
		:rtype: bool
		"""

		if not self.sectionExists(section):
			LOGGER.debug("> Adding '{0}' section.".format(section))
			self.__sections[section] = OrderedDict() if self.__preserveOrder else dict()

		self.__sections[section][attribute] = value

		return True

	def write(self,
			  namespaces=False,
			  splitter="=",
			  commentLimiter=(";"),
			  spacesAroundSplitter=True,
			  spaceAfterCommentLimiter=True):
		"""
		Writes defined file using :obj:`SectionsFileParser.sections` and
			:obj:`SectionsFileParser.comments` class properties content.

		Usage::

			>>> sections = {"Section A": {"Section A|Attribute 1": "Value A"}, \
"Section B": {"Section B|Attribute 2": "Value B"}}
			>>> sectionsFileParser = SectionsFileParser("SectionsFile.rc")
			>>> sectionsFileParser.sections = sections
			>>> sectionsFileParser.write()
			True
			>>> sectionsFileParser.read()
			u'[Section A]\\nAttribute 1 = Value A\\n\\n[Section B]\\nAttribute 2 = Value B\\n'

		:param namespaces: Attributes are namespaced.
		:type namespaces: bool
		:param splitter: Splitter character.
		:type splitter: unicode
		:param commentLimiter: Comment limiter character.
		:type commentLimiter: unicode
		:param spacesAroundSplitter: Spaces around attributes and value splitters.
		:type spacesAroundSplitter: bool
		:param spaceAfterCommentLimiter: Space after comments limiter.
		:type spaceAfterCommentLimiter: bool
		:return: Method success.
		:rtype: bool
		"""

		self.uncache()

		LOGGER.debug("> Setting '{0}' file content.".format(self.path))
		attributeTemplate = "{{0}} {0} {{1}}\n".format(splitter) if spacesAroundSplitter else \
							"{{0}}{0}{{1}}\n".format(splitter)
		attributeTemplate = foundations.strings.replace(attributeTemplate, {"{{" : "{", "}}" : "}"})
		commentTemplate = spaceAfterCommentLimiter and "{0} {{0}}\n".format(commentLimiter) or \
						  "{0}{{0}}\n".format(commentLimiter)
		if self.__defaultsSection in self.__sections:
			LOGGER.debug("> Appending '{0}' default section.".format(self.__defaultsSection))
			if self.__comments:
				for comment, value in self.__comments.iteritems():
					if self.__defaultsSection in comment:
						value = value["content"] or ""
						LOGGER.debug("> Appending '{0}' comment with '{1}' value.".format(comment, value))
						self.content.append(commentTemplate.format(value))
			for attribute, value in self.__sections[self.__defaultsSection].iteritems():
				attribute = namespaces and attribute or foundations.namespace.removeNamespace(attribute,
																							  self.__namespaceSplitter,
																							  rootOnly=True)
				value = value or ""
				LOGGER.debug("> Appending '{0}' attribute with '{1}' value.".format(attribute, value))
				self.content.append(attributeTemplate.format(attribute, value))
			self.content.append("\n")

		for i, section in enumerate(self.__sections):
			LOGGER.debug("> Appending '{0}' section.".format(section))
			self.content.append("[{0}]\n".format(section))
			if self.__comments:
				for comment, value in self.__comments.iteritems():
					if section in comment:
						value = value["content"] or ""
						LOGGER.debug("> Appending '{0}' comment with '{1}' value.".format(comment, value))
						self.content.append(commentTemplate.format(value))
			for attribute, value in self.__sections[section].iteritems():
				if foundations.namespace.removeNamespace(attribute) == self.__rawSectionContentIdentifier:
					LOGGER.debug("> Appending '{0}' raw section content.".format(section))
					for line in value:
						self.content.append(line)
				else:
					LOGGER.debug("> Appending '{0}' section.".format(section))
					attribute = namespaces and attribute or foundations.namespace.removeNamespace(attribute,
																								  self.__namespaceSplitter,
																								  rootOnly=True)
					value = value or ""
					LOGGER.debug("> Appending '{0}' attribute with '{1}' value.".format(attribute, value))
					self.content.append(attributeTemplate.format(attribute, value))
			if i != len(self.__sections) - 1:
				self.content.append("\n")
		foundations.io.File.write(self)
		return True
Exemple #14
0
class GitCollection(object):
    """
    Hold a collection of instances of GIT repositories
    """
    
    def __init__(self, prefix=None):
        """
        prefix will help filter repos that you want to serve
        """
        self.prefix = prefix
        self.repos_= {}
        self.lazy_loaded_ = False 
        
    def reset(self):
        self.repos_= {}
        self.lazy_loaded_ = False 
        
    def watch(self, repo_path, is_bare = True):
        pass
        
    def lazy_load_(self, name):
        if not self.lazy_loaded_ and self.repos_:
            #print('%s %s %s'%(name,self.lazy_loaded_, len(self.repos_)))
            return
        
        if name in self.repos_:
            return
        
        base_path = os.path.join(GIT_ROOT, name)
        bare_path = os.path.join(GIT_ROOT, '.'.join([name,'git']))
        for path in [base_path, bare_path]:
            try:
                repo = None
                try:
                    repo = GitRepository(path)
                except Exception as e:
                    #print('Unable to instanciate a GitRepo for: %s'%path)
                    repo = None
                if repo != None:
                    self.repos_[name] = repo
                    self.watch(path, repo.is_bare)
                    break
                    
            except Exception as e:
                print 'ERROR (lazy_load_): %s'%e
        self.lazy_loaded_ = True 
        
            
    def load_all_(self):
        if self.lazy_loaded_:
            self.repos_={}
            self.lazy_loaded_ = False 
        else:
            if self.repos_:
                return
                
        entries = os.listdir(GIT_ROOT)
        for d in entries:
            if self.prefix != None:
                pd = d.split('.')[0]
                if pd != self.prefix:
                    continue
                
            name = os.path.join(GIT_ROOT,d)
            if os.path.isdir(name):
                try:
                    repo = None
                    try:
                        repo = GitRepository(name)
                        head = repo.head.get_object().commit_time
                    except Exception as e:
                        # print('Can not create a repo off [%s]'%name)
                        # print('\t %s'%e)
                        repo = None
                    if repo != None:
                        if d.endswith('git'):
                            slug = d[:-4]
                        else:
                            slug = d
                        self.repos_[slug] = repo
                        self.watch(name, repo.is_bare)
                        
                except Exception as e:
                    print 'ERROR (root): %s'%e
            
        # If we sort on __init__ we have to sort less often
        self.repos_ = OrderedDict(sorted(self.repos_.iteritems(), key=lambda r: r[1].head.get_object().commit_time, reverse=True))
        
    def get_names(self):
        self.load_all_()
        return self.repos_.keys()
        
    def get_all(self):
        self.load_all_()
        return self.repos_
    
    def __getattr__(self, name):
        self.lazy_load_(name)
        if name in self.repos_:
            return self.repos_[name]
        else:
            raise AttributeError("GitCollection has no repository %s \n%s" % (name,self.repos_))
            
        
    # 3.4.6. Emulating container types <http://docs.python.org/reference/datamodel.html#emulating-container-types>
    # mainly by exposing "read-only" self.repos_'s methods 
    
    def __len__(self):
        self.load_all_()
        return count(self.repos_)
        
    def __getitem__(self, key):
        self.lazy_load_(key)
        return self.repos_[key]
        
    def __iter__(self):
        self.load_all_()
        return self.repos_.__iter__()
        
    def __contains__(self, item):
        self.lazy_load_(name)
        return self.repos_.__contains__(item)
        
Exemple #15
0
def withdraws_monthly_payment(request, currency_acronym):
    user = request.user

    if not user.is_authenticated() or not user.is_superuser:
        raise http.Http404()

    try:
        currency = Currency.objects.get(acronym=currency_acronym)
    except Currency.DoesNotExist:
        raise http.Http404()

    receiverList = OrderedDict()
    today = datetime.date.today()

    tutors = Tutor.objects.select_related().filter(
                profile__currency = currency, 
                profile__income__gt = 0, 
                profile__paypal_email__isnull=False,
            ).exclude(profile__paypal_email = '')
    
    total = 0
    for tutor in tutors:
        map(lambda w: w.cancel(), tutor.withdraws.filter(status=WithdrawItem.STATUS_TYPES.PENDING))
        
        profile = tutor.profile
        credits = profile.income
        amount = round(credits * currency.credit_value(), 2)
        email = profile.paypal_email
        withdraw = WithdrawItem(
            user = tutor,
            value = amount,
            credits = credits, 
            email = email,
            currency = currency,
            monthly_payment = True,
        )
        withdraw.save()

        receiverList[tutor.id] = {
            'amount': '%.2f' % amount,
            'email': email,
            'name': u'%s' % tutor.get_full_name(),
            'invoiceId': withdraw.invoice,
            'customId': 'UTWD-%s' % withdraw.id,
        }
        
        total += amount

    
    payment = OrderedDict({
        'currencyCode': currency.acronym,
        'ipnNotificationUrl': 'http://%s%s' % (settings.PROJECT_SITE_DOMAIN, reverse('paypal-ap-ipn')),
        'cancelUrl': "http://%s%s" % (settings.PROJECT_SITE_DOMAIN, reverse('withdraws_monthly')),
        'returnUrl': "http://%s%s" % (settings.PROJECT_SITE_DOMAIN, reverse('withdraws_monthly')),
        'startingDate': today.strftime('%Y-%m-%d'),
        'endingDate': (today + datetime.timedelta(days=1)).strftime('%Y-%m-%d'),
        'options': OrderedDict({'displayOptions.businessName': settings.PROJECT_NAME}),
    })

    for i, (id, receiver) in enumerate(receiverList.iteritems()):
        payment['receiverList.receiver(%s).amount' % i] = receiver['amount']
        payment['receiverList.receiver(%s).email' % i] = receiver['email']
        payment['receiverList.receiver(%s).name' % i] = receiver['name']
        payment['receiverList.receiver(%s).invoiceId' % i] = receiver['invoiceId']
        payment['options']['receiverOptions(%s).receiver.email' % (i)] = receiver['email']
        payment['options']['receiverOptions(%s).description' % (i)] = receiver['name']
        payment['options']['receiverOptions(%s).customId' % (i)] = receiver['invoiceId']    
    
    return {
        'payment': payment,
        'currency': currency,
        'total': total,
    }
Exemple #16
0
class Job(object):
    """Jobs!

    """
    email = settings.email
    templates = settings.template_paths
    use_last_used = False
    last_used = {'queue': None, 'save_script': False, 'submit_job': False}

    def __init__(self, filename):
        self.input_file = FileInfo(filename)
        self.job_fullname = self.input_file.fullname
        self.job_directory = self.input_file.directory

        self.queue = None
        self.processors = None
        self.script_name = None
        self.template_file = None
       
        self.summary = OrderedDict() 
        self.summary['Job Name'] = self.job_fullname

    def preprocess(self):
        """Run after file exists verification and queue setting but before any 
        other job processing, including creating the script file.
        """
        pass
    
    def customize_template(self):
        """Run immediately after the script file has been created.  Useful for
        adding custom fields to the template.
        """
        pass

    def postprocess(self):
        """Run immediately after the script file has been created, but before
        the calculation summary and job submission.
        """
        pass

    def process(self):
        """Main processing routine for jobs.

        """
        message('Processing job: %s' % self.job_fullname, lines_before=1)
        self._request_queue()
        self.preprocess()
        self._verify_processors()
        self._prepare_template()
        self.postprocess()
        self._calculation_summary()
        self._submit()

    @classmethod
    def cleanup(self):
        """Remove all temporary submission script files for all jobs."""
        Template.cleanup()

    def _request_queue(self):
        """Set the queue attribute ."""
        if not Job.use_last_used: 
            queue_options = []
            for queue in settings.queues.itervalues():
                queue_options.append(queue.option) 

            queue_name = prompt('On which queue should this job be run?',
                                queue_options, settings.default_queue)
            queue = queue_name + '.q' 
        else:
            queue = Job.last_used['queue']
        Job.last_used['queue'] = queue
        self.summary['Queue'] = queue
        self.queue = queue

    def _verify_processors(self):
        """Verify number processors doesn't exceed max for selected queue."""
        try:
            processors = int(self.processors)
            queue = settings.queues[self.queue]
            max_cores = queue.cores
            if processors > max_cores:
                error('Max number of cores exceeded: job specifies %s cores '
                      'and %s only has %s cores' % 
                      (processors, queue, max_cores), die=True)
            elif processors == 1 and not settings.serial:
                error('Use submit -s (serial mode) for single processor jobs',
                      die=True)
            elif processors > 1 and settings.serial:
                error('Serial mode is for single-processor jobs only: job '
                      'specifies %s cores' % processors, die=True)
            else:
                return True
        except AttributeError:
            error('Cannot verify processors until processor and queue '
                  'attributes have been set', die=True)

    def _prepare_template(self):
        save_script = Job.last_used['save_script']
        if settings.request_save:
            if not Job.use_last_used: 
                save_script = prompt('Would you like to save the script file?',
                                     ['y', 'n'], 'n', return_boolean=True)
                Job.last_used['save_script'] = save_script 

            if save_script: 
                self.script_name = prompt("Specify the script name or press "
                                          "enter to use the job's filename:")

        self.template = Template(self.job_fullname, self.job_directory,
                                 self.queue, self.processors,
                                 self.template_file, Job.templates,
                                 email=Job.email, script_name=self.script_name)

        # Allow inherited classes to insert custom meta-data into the script
        self.customize_template()

        # Validation of *script_name* is handled by the ``Template`` class,
        # if an invalid name was provided, update the ``Job`` *script_name*
        # with the valid version obtained by ``Template``.
        self.script_name = self.template.script_name
        if save_script:
            self.summary['Submission Script'] = self.script_name
            self.template.save(self.script_name)
        else:
            self.template.write()

    def _calculation_summary(self):
        """Print the calculation summary showing the job's metadata.
        
        Any key-value pairs stored in the object's ``summary`` attribute will
        be displayed before job submission.

        """
        data_display = []
        for key,value in self.summary.iteritems():
            display = '%18s: %-20s' % (key, value)
            data_display.append(display)
        
        seperator = '-'*40
        header = '%29s' % 'CALCULATION SUMMARY'
        
        output = [seperator, header, seperator, data_display, seperator]
        for item in output:
            if isinstance(item, list):
                for element in item:
                    message(element)
            else:
                message(item)

    def _submit(self):
        """Submit the job to the queue for processing.

        This option can be saved to the ``Job`` class ``last_used`` attribute
        for automation of script submission.
        
        """
        submit_job = Job.last_used['submit_job']
        if not Job.use_last_used:
            submit_job = prompt('Submit the job?', ['y', 'n'], 'y',
                                return_boolean=True)
            Job.last_used['submit_job'] = submit_job
        if submit_job:
            self.template.submit()
def sliceReStructuredText(input, output):
    """
	Slices given reStructuredText file.

	:param input: ReStructuredText file to slice.
	:type input: unicode
	:param output: Directory to output sliced reStructuredText files.
	:type output: unicode
	:return: Definition success.
	:rtype: bool
	"""

    LOGGER.info("{0} | Slicing '{1}' file!".format(sliceReStructuredText.__name__, input))
    file = File(input)
    file.cache()

    slices = OrderedDict()
    for i, line in enumerate(file.content):
        search = re.search(r"^\.\. \.(\w+)", line)
        if search:
            slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

    index = 0
    for slice, sliceStart in slices.iteritems():
        sliceFile = File(os.path.join(output, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
        LOGGER.info("{0} | Outputing '{1}' file!".format(sliceReStructuredText.__name__, sliceFile.path))
        sliceEnd = (
            index < (len(slices.values()) - 1)
            and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT
            or len(file.content)
        )

        for i in range(sliceStart, sliceEnd):
            skipLine = False
            for item in CONTENT_DELETION:
                if re.search(item, file.content[i]):
                    LOGGER.info(
                        "{0} | Skipping Line '{1}' with '{2}' content!".format(sliceReStructuredText.__name__, i, item)
                    )
                    skipLine = True
                    break

            if skipLine:
                continue

            line = file.content[i]
            for pattern, value in STATEMENT_SUBSTITUTE.iteritems():
                line = re.sub(pattern, value, line)

            search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
            if search:
                LOGGER.info(
                    "{0} | Updating Line '{1}' link: '{2}'!".format(
                        sliceReStructuredText.__name__, i, search.groups()[0]
                    )
                )
                line = "-  :ref:`{0}`\n".format(search.groups()[0])
            sliceFile.content.append(line)

        sliceFile.write()
        index += 1

    return True
Exemple #18
0
class SearchInFiles(foundations.ui.common.QWidgetFactory(uiFile=UI_FILE)):
	"""
	Defines search and replace in files dialog used by the **ScriptEditor** Component.
	"""

	def __init__(self, parent, *args, **kwargs):
		"""
		Initializes the class.

		:param parent: Object parent.
		:type parent: QObject
		:param \*args: Arguments.
		:type \*args: \*
		:param \*\*kwargs: Keywords arguments.
		:type \*\*kwargs: \*\*
		"""

		LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))

		super(SearchInFiles, self).__init__(parent, *args, **kwargs)

		# --- Setting class attributes. ---
		self.__container = self.__scriptEditor = parent

		self.__filesCache = foundations.cache.Cache()

		self.__searchPatternsModel = None
		self.__replaceWithPatternsModel = None

		self.__model = None
		self.__view = None
		self.__delegate = None

		self.__locations = OrderedDict([("Add Directory ...", "directory"),
								("Add File ...", "file"),
								("Add Opened Files", "editors"),
								("Add Include Filter", "includeFilter"),
								("Add Exclude Filter", "excludeFilter")])
		self.__locationsMenu = None

		self.__defaultFilterIn = "*.txt"
		self.__filtersInFormat = "{0}"
		self.__defaultFilterOut = "*.txt"
		self.__filtersOutFormat = "!{0}"
		self.__defaultTarget = "Opened Files"
		self.__targetsFormat = "<{0}>"

		self.__defaultLineNumberWidth = 6
		self.__defaultLineColor = QColor(144, 144, 144)

		self.__ignoreHiddenFiles = True

		self.__searchWorkerThread = None

		SearchInFiles.__initializeUi(self)

	#******************************************************************************************************************
	#***	Attributes properties.
	#******************************************************************************************************************
	@property
	def container(self):
		"""
		Property for **self.__container** attribute.

		:return: self.__container.
		:rtype: QObject
		"""

		return self.__container

	@container.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def container(self, value):
		"""
		Setter for **self.__container** attribute.

		:param value: Attribute value.
		:type value: QObject
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "container"))

	@container.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def container(self):
		"""
		Deleter for **self.__container** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "container"))

	@property
	def scriptEditor(self):
		"""
		Property for **self.__scriptEditor** attribute.

		:return: self.__scriptEditor.
		:rtype: QWidget
		"""

		return self.__scriptEditor

	@scriptEditor.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def scriptEditor(self, value):
		"""
		Setter for **self.__scriptEditor** attribute.

		:param value: Attribute value.
		:type value: QWidget
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "scriptEditor"))

	@scriptEditor.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def scriptEditor(self):
		"""
		Deleter for **self.__scriptEditor** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "scriptEditor"))

	@property
	def filesCache(self):
		"""
		Property for **self.__filesCache** attribute.

		:return: self.__filesCache.
		:rtype: Cache
		"""

		return self.__filesCache

	@filesCache.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def filesCache(self, value):
		"""
		Setter for **self.__filesCache** attribute.

		:param value: Attribute value.
		:type value: Cache
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "filesCache"))

	@filesCache.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def filesCache(self):
		"""
		Deleter for **self.__filesCache** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "filesCache"))

	@property
	def searchPatternsModel(self):
		"""
		Property for **self.__searchPatternsModel** attribute.

		:return: self.__searchPatternsModel.
		:rtype: PatternsModel
		"""

		return self.__searchPatternsModel

	@searchPatternsModel.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def searchPatternsModel(self, value):
		"""
		Setter for **self.__searchPatternsModel** attribute.

		:param value: Attribute value.
		:type value: PatternsModel
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "searchPatternsModel"))

	@searchPatternsModel.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def searchPatternsModel(self):
		"""
		Deleter for **self.__searchPatternsModel** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "searchPatternsModel"))

	@property
	def replaceWithPatternsModel(self):
		"""
		Property for **self.__replaceWithPatternsModel** attribute.

		:return: self.__replaceWithPatternsModel.
		:rtype: PatternsModel
		"""

		return self.__replaceWithPatternsModel

	@replaceWithPatternsModel.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def replaceWithPatternsModel(self, value):
		"""
		Setter for **self.__replaceWithPatternsModel** attribute.

		:param value: Attribute value.
		:type value: PatternsModel
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "replaceWithPatternsModel"))

	@replaceWithPatternsModel.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def replaceWithPatternsModel(self):
		"""
		Deleter for **self.__replaceWithPatternsModel** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "replaceWithPatternsModel"))

	@property
	def model(self):
		"""
		Property for **self.__model** attribute.

		:return: self.__model.
		:rtype: SearchResultsModel
		"""

		return self.__model

	@model.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def model(self, value):
		"""
		Setter for **self.__model** attribute.

		:param value: Attribute value.
		:type value: SearchResultsModel
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "model"))

	@model.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def model(self):
		"""
		Deleter for **self.__model** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "model"))

	@property
	def view(self):
		"""
		Property for **self.__view** attribute.

		:return: self.__view.
		:rtype: QWidget
		"""

		return self.__view

	@view.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def view(self, value):
		"""
		Setter for **self.__view** attribute.

		:param value: Attribute value.
		:type value: QWidget
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "view"))

	@view.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def view(self):
		"""
		Deleter for **self.__view** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "view"))

	@property
	def delegate(self):
		"""
		Property for **self.__delegate** attribute.

		:return: self.__delegate.
		:rtype: QItemDelegate
		"""

		return self.__delegate

	@delegate.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def delegate(self, value):
		"""
		Setter for **self.__delegate** attribute.

		:param value: Attribute value.
		:type value: QItemDelegate
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "delegate"))

	@delegate.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def delegate(self):
		"""
		Deleter for **self.__delegate** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "delegate"))

	@property
	def locations(self):
		"""
		Property for **self.__locations** attribute.

		:return: self.__locations.
		:rtype: OrderedDict
		"""

		return self.__locations

	@locations.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def locations(self, value):
		"""
		Setter for **self.__locations** attribute.

		:param value: Attribute value.
		:type value: OrderedDict
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "locations"))

	@locations.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def locations(self):
		"""
		Deleter for **self.__locations** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "locations"))

	@property
	def locationsMenu(self):
		"""
		Property for **self.__locationsMenu** attribute.

		:return: self.__locationsMenu.
		:rtype: QMenu
		"""

		return self.__locationsMenu

	@locationsMenu.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def locationsMenu(self, value):
		"""
		Setter for **self.__locationsMenu** attribute.

		:param value: Attribute value.
		:type value: QMenu
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "locationsMenu"))

	@locationsMenu.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def locationsMenu(self):
		"""
		Deleter for **self.__locationsMenu** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "locationsMenu"))

	@property
	def defaultFilterIn(self):
		"""
		Property for **self.__defaultFilterIn** attribute.

		:return: self.__defaultFilterIn.
		:rtype: unicode
		"""

		return self.__defaultFilterIn

	@defaultFilterIn.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultFilterIn(self, value):
		"""
		Setter for **self.__defaultFilterIn** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
			"defaultFilterIn", value)
			assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format("defaultFilterIn", value)
		self.__defaultFilterIn = value

	@defaultFilterIn.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultFilterIn(self):
		"""
		Deleter for **self.__defaultFilterIn** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultFilterIn"))
	@property
	def filtersInFormat(self):
		"""
		Property for **self.__filtersInFormat** attribute.

		:return: self.__filtersInFormat.
		:rtype: unicode
		"""

		return self.__filtersInFormat

	@filtersInFormat.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def filtersInFormat(self, value):
		"""
		Setter for **self.__filtersInFormat** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
			"filtersInFormat", value)
			assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format("filtersInFormat", value)
		self.__filtersInFormat = value

	@filtersInFormat.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def filtersInFormat(self):
		"""
		Deleter for **self.__filtersInFormat** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "filtersInFormat"))

	@property
	def defaultFilterOut(self):
		"""
		Property for **self.__defaultFilterOut** attribute.

		:return: self.__defaultFilterOut.
		:rtype: unicode
		"""

		return self.__defaultFilterOut

	@defaultFilterOut.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultFilterOut(self, value):
		"""
		Setter for **self.__defaultFilterOut** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
			"defaultFilterOut", value)
			assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format("defaultFilterOut", value)
		self.__defaultFilterOut = value

	@defaultFilterOut.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultFilterOut(self):
		"""
		Deleter for **self.__defaultFilterOut** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultFilterOut"))
	@property
	def filtersOutFormat(self):
		"""
		Property for **self.__filtersOutFormat** attribute.

		:return: self.__filtersOutFormat.
		:rtype: unicode
		"""

		return self.__filtersOutFormat

	@filtersOutFormat.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def filtersOutFormat(self, value):
		"""
		Setter for **self.__filtersOutFormat** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
			"filtersOutFormat", value)
			assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format("filtersOutFormat", value)
		self.__filtersOutFormat = value

	@filtersOutFormat.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def filtersOutFormat(self):
		"""
		Deleter for **self.__filtersOutFormat** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "filtersOutFormat"))

	@property
	def defaultTarget(self):
		"""
		Property for **self.__defaultTarget** attribute.

		:return: self.__defaultTarget.
		:rtype: unicode
		"""

		return self.__defaultTarget

	@defaultTarget.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultTarget(self, value):
		"""
		Setter for **self.__defaultTarget** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
			"defaultTarget", value)
			assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format("defaultTarget", value)
		self.__defaultTarget = value

	@defaultTarget.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultTarget(self):
		"""
		Deleter for **self.__defaultTarget** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultTarget"))
	@property
	def targetsFormat(self):
		"""
		Property for **self.__targetsFormat** attribute.

		:return: self.__targetsFormat.
		:rtype: unicode
		"""

		return self.__targetsFormat

	@targetsFormat.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def targetsFormat(self, value):
		"""
		Setter for **self.__targetsFormat** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
			"targetsFormat", value)
			assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format("targetsFormat", value)
		self.__targetsFormat = value

	@targetsFormat.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def targetsFormat(self):
		"""
		Deleter for **self.__targetsFormat** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "targetsFormat"))

	@property
	def defaultLineNumberWidth(self):
		"""
		Property for **self.__defaultLineNumberWidth** attribute.

		:return: self.__defaultLineNumberWidth.
		:rtype: int
		"""

		return self.__defaultLineNumberWidth

	@defaultLineNumberWidth.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultLineNumberWidth(self, value):
		"""
		Setter for **self.__defaultLineNumberWidth** attribute.

		:param value: Attribute value.
		:type value: int
		"""

		if value is not None:
			assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format(
			"defaultLineNumberWidth", value)
			assert value > 0, "'{0}' attribute: '{1}' need to be exactly positive!".format("defaultLineNumberWidth", value)
		self.__defaultLineNumberWidth = value

	@defaultLineNumberWidth.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultLineNumberWidth(self):
		"""
		Deleter for **self.__defaultLineNumberWidth** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultLineNumberWidth"))

	@property
	def defaultLineColor(self):
		"""
		Property for **self.__defaultLineColor** attribute.

		:return: self.__defaultLineColor.
		:rtype: QColor
		"""

		return self.__defaultLineColor

	@defaultLineColor.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultLineColor(self, value):
		"""
		Setter for **self.__defaultLineColor** attribute.

		:param value: Attribute value.
		:type value: QColor
		"""

		if value is not None:
			assert type(value) is QColor, "'{0}' attribute: '{1}' type is not 'QColor'!".format("defaultLineColor", value)
		self.__defaultLineColor = value

	@defaultLineColor.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultLineColor(self):
		"""
		Deleter for **self.__defaultLineColor** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultLineColor"))

	@property
	def ignoreHiddenFiles(self):
		"""
		Property for **self.__ignoreHiddenFiles** attribute.

		:return: self.__ignoreHiddenFiles.
		:rtype: bool
		"""

		return self.__ignoreHiddenFiles

	@ignoreHiddenFiles.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def ignoreHiddenFiles(self, value):
		"""
		Setter for **self.__ignoreHiddenFiles** attribute.

		:param value: Attribute value.
		:type value: bool
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "ignoreHiddenFiles"))

	@ignoreHiddenFiles.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def ignoreHiddenFiles(self):
		"""
		Deleter for **self.__ignoreHiddenFiles** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "ignoreHiddenFiles"))

	@property
	def searchWorkerThread(self):
		"""
		Property for **self.__searchWorkerThread** attribute.

		:return: self.__searchWorkerThread.
		:rtype: QThread
		"""

		return self.__searchWorkerThread

	@searchWorkerThread.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def searchWorkerThread(self, value):
		"""
		Setter for **self.__searchWorkerThread** attribute.

		:param value: Attribute value.
		:type value: QThread
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "searchWorkerThread"))

	@searchWorkerThread.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def searchWorkerThread(self):
		"""
		Deleter for **self.__searchWorkerThread** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "searchWorkerThread"))

	#******************************************************************************************************************
	#***	Class methods
	#******************************************************************************************************************
	def show(self):
		"""
		Reimplements the :meth:`QWidget.show` method.
		"""

		selectedText = self.__container.getCurrentEditor().getSelectedText()
		selectedText and SearchAndReplace.insertPattern(selectedText, self.__searchPatternsModel)
		self.Search_comboBox.lineEdit().selectAll()
		self.Search_comboBox.setFocus()

		super(SearchInFiles, self).show()
		self.raise_()

	def closeEvent(self, event):
		"""
		Reimplements the :meth:`QWidget.closeEvent` method.

		:param event: QEvent.
		:type event: QEvent
		"""

		self.__interruptSearch()
		super(SearchInFiles, self).closeEvent(event)

	def __initializeUi(self):
		"""
		Initializes the Widget ui.
		"""

		umbra.ui.common.setWindowDefaultIcon(self)

		self.__model = SearchResultsModel(self)
		self.__delegate = RichText_QStyledItemDelegate(self)

		self.Search_Results_treeView.setParent(None)
		self.Search_Results_treeView = SearchResults_QTreeView(self,
															self.__model,
															message="No Search Result to view!")
		self.Search_Results_treeView.setItemDelegate(self.__delegate)
		self.Search_Results_treeView.setObjectName("Search_Results_treeView")
		self.Search_Results_frame_gridLayout.addWidget(self.Search_Results_treeView, 0, 0)
		self.__view = self.Search_Results_treeView
		self.__view.setContextMenuPolicy(Qt.ActionsContextMenu)
		self.__view_addActions()

		self.__searchPatternsModel = self.__container.searchAndReplace.searchPatternsModel
		self.Search_comboBox.setModel(self.__container.searchAndReplace.searchPatternsModel)
		self.Search_comboBox.setInsertPolicy(QComboBox.InsertAtTop)
		self.Search_comboBox.completer().setCaseSensitivity(Qt.CaseSensitive)

		self.__replaceWithPatternsModel = self.__container.searchAndReplace.replaceWithPatternsModel
		self.Replace_With_comboBox.setModel(self.__container.searchAndReplace.replaceWithPatternsModel)
		self.Replace_With_comboBox.setInsertPolicy(QComboBox.InsertAtTop)
		self.Replace_With_comboBox.completer().setCaseSensitivity(Qt.CaseSensitive)

		self.Where_lineEdit.setParent(None)
		self.Where_lineEdit = Search_QLineEdit(self)
		self.Where_lineEdit.setObjectName("Where_lineEdit")
		self.Where_frame_gridLayout.addWidget(self.Where_lineEdit, 0, 0)
		self.__locationsMenu = QMenu()
		for title, location in self.__locations.iteritems():
			self.__locationsMenu.addAction(self.__container.engine.actionsManager.registerAction(
			"Actions|Umbra|Components|factory.scriptEditor|Search In Files|{0}".format(title),
			text="{0}".format(title),
			slot=functools.partial(self.__addLocation, location)))
		self.Where_lineEdit.searchActiveLabel.setMenu(self.__locationsMenu)
		self.Where_lineEdit.setPlaceholderText("Use the magnifier to add locations!")

		self.installEventFilter(ValidationFilter(self))

		# Signals / Slots.
		self.__view.selectionModel().selectionChanged.connect(self.__view_selectionModel__selectionChanged)
		self.__view.doubleClicked.connect(self.__view__doubleClicked)
		self.__searchPatternsModel.patternInserted.connect(functools.partial(
		self.__patternsModel__patternInserted, self.Search_comboBox))
		self.__replaceWithPatternsModel.patternInserted.connect(functools.partial(
		self.__patternsModel__patternInserted, self.Replace_With_comboBox))
		self.Search_pushButton.clicked.connect(self.__Search_pushButton__clicked)
		self.Close_pushButton.clicked.connect(self.__Close_pushButton__clicked)

	def __view_addActions(self):
		"""
		Sets the View actions.
		"""

		self.__view.addAction(self.__container.engine.actionsManager.registerAction(
		"Actions|Umbra|Components|factory.scriptEditor|Search In Files|Replace All",
		slot=self.__view_replaceAllAction__triggered))
		self.__view.addAction(self.__container.engine.actionsManager.registerAction(
		"Actions|Umbra|Components|factory.scriptEditor|Search In Files|Replace Selected",
		slot=self.__view_replaceSelectedAction__triggered))
		separatorAction = QAction(self.__view)
		separatorAction.setSeparator(True)
		self.__view.addAction(separatorAction)
		self.__view.addAction(self.__container.engine.actionsManager.registerAction(
		"Actions|Umbra|Components|factory.scriptEditor|Search In Files|Save All",
		slot=self.__view_saveAllAction__triggered))
		self.__view.addAction(self.__container.engine.actionsManager.registerAction(
		"Actions|Umbra|Components|factory.scriptEditor|Search In Files|Save Selected",
		slot=self.__view_saveSelectedAction__triggered))

	def __view_replaceAllAction__triggered(self, checked):
		"""
		Defines the slot triggered by **'Actions|Umbra|Components|factory.scriptEditor|Search In Files|Replace All'** action.

		:param checked: Action checked state.
		:type checked: bool
		:return: Method success.
		:rtype: bool
		"""

		allNodes = filter(lambda x: x.family in ("SearchFile", "SearchOccurence"), self.__model.rootNode.children)
		if allNodes:
			return self.replace(allNodes)

	def __view_replaceSelectedAction__triggered(self, checked):
		"""
		Defines the slot triggered by **'Actions|Umbra|Components|factory.scriptEditor|Search In Files|Replace Selected'** action.

		:param checked: Action checked state.
		:type checked: bool
		:return: Method success.
		:rtype: bool
		"""

		selectedNodes = filter(lambda x: x.family in ("SearchFile", "SearchOccurence"), self.__view.getSelectedNodes())
		if selectedNodes:
			return self.replace(filter(lambda x: x.parent not in selectedNodes, selectedNodes))

	def __view_saveAllAction__triggered(self, checked):
		"""
		Defines the slot triggered by **'Actions|Umbra|Components|factory.scriptEditor|Search In Files|Save All'** action.

		:param checked: Action checked state.
		:type checked: bool
		:return: Method success.
		:rtype: bool
		"""

		allNodes = filter(lambda x: x.family is "ReplaceResult", self.__model.rootNode.children)
		if allNodes:
			return self.saveFiles(allNodes)

	def __view_saveSelectedAction__triggered(self, checked):
		"""
		Defines the slot triggered by **'Actions|Umbra|Components|factory.scriptEditor|Search In Files|Save Selected'** action.

		:param checked: Action checked state.
		:type checked: bool
		:return: Method success.
		:rtype: bool
		"""

		selectedNodes = filter(lambda x: x.family is "ReplaceResult", self.__view.getSelectedNodes())
		if selectedNodes:
			return self.saveFiles(selectedNodes)

	def __patternsModel__patternInserted(self, comboBox, index):
		"""
		Defines the slot triggered by a pattern when inserted into a patterns Model.

		:param comboBox: Pattern Model attached comboBox.
		:type comboBox: QComboBox
		:param index: Inserted pattern index.
		:type index: QModelIndex
		"""

		comboBox.setCurrentIndex(index.row())

	def __Search_pushButton__clicked(self, checked):
		"""
		Defines the slot triggered by **Search_pushButton** Widget when clicked.

		:param checked: Checked state.
		:type checked: bool
		"""

		self.search()

	def __Close_pushButton__clicked(self, checked):
		"""
		Defines the slot triggered by **Close_pushButton** Widget when clicked.

		:param checked: Checked state.
		:type checked: bool
		"""

		self.close()

	def __view__doubleClicked(self, index):
		"""
		Defines the slot triggered by a View when double clicked.

		:param index: Clicked item index.
		:type index: QModelIndex
		"""

		node = self.__model.getNode(index)

		if node.family == "SearchOccurence":
			file = node.parent.file
			occurence = node
		elif node.family in ("SearchFile", "ReplaceResult"):
			file = node.file
			occurence = None

		self.__highlightOccurence(file, occurence)

	def __view_selectionModel__selectionChanged(self, selectedItems, deselectedItems):
		"""
		Defines the slot triggered by the View **selectionModel** when selection changed.

		:param selectedItems: Selected items.
		:type selectedItems: QItemSelection
		:param deselectedItems: Deselected items.
		:type deselectedItems: QItemSelection
		"""

		indexes = selectedItems.indexes()
		if not indexes:
			return

		node = self.__model.getNode(indexes.pop())

		if node.family == "SearchOccurence":
			file = node.parent.file
			occurence = node
		elif node.family in ("SearchFile", "ReplaceResult"):
			file = node.file
			occurence = None

		if self.__container.getEditor(file):
			self.__highlightOccurence(file, occurence)

	def __searchWorkerThread__searchFinished(self, searchResults):
		"""
		Defines the slot triggered by :attr:`SearchInFiles.grepWorkerThread` attribute worker thread
		when the search is finished.

		:param searchResults: Search results.
		:type searchResults: list
		"""

		self.setSearchResults(searchResults)

		self.__container.engine.stopProcessing()
		metrics = self.__model.getMetrics()
		self.__container.engine.notificationsManager.notify(
		"{0} | '{1}' pattern occurence(s) found in '{2}' files!".format(self.__class__.__name__,
																	metrics["SearchOccurence"],
																	metrics["SearchFile"]))

	def __addLocation(self, type, *args):
		"""
		Defines the slot triggered by **Where_lineEdit** Widget when a context menu entry is clicked.

		:param type: Location type.
		:type type: unicode
		:param \*args: Arguments.
		:type \*args: \*
		"""

		if type == "directory":
			location = umbra.ui.common.storeLastBrowsedPath((QFileDialog.getExistingDirectory(self,
																						"Add Directory:",
																						RuntimeGlobals.lastBrowsedPath)))
		elif type == "file":
			location = umbra.ui.common.storeLastBrowsedPath((QFileDialog.getOpenFileName(self,
																						"Add File:",
																						RuntimeGlobals.lastBrowsedPath,
																						"All Files (*)")))
		elif type == "editors":
			location = self.__targetsFormat.format(self.__defaultTarget)
		elif type == "includeFilter":
			location = self.__filtersInFormat.format(self.__defaultFilterIn)
		elif type == "excludeFilter":
			location = self.__filtersOutFormat.format(self.__defaultFilterOut)

		location and self.Where_lineEdit.setText(", ".join(filter(bool, (foundations.strings.toString(
		self.Where_lineEdit.text()), location))))

	def __formatOccurence(self, occurence):
		"""
		Formats the given occurence and returns the matching rich html text.

		:param occurence: Occurence to format.
		:type occurence: Occurence
		:return: Rich text.
		:rtype: unicode
		"""

		color = "rgb({0}, {1}, {2})"
		spanFormat = "<span style=\"color: {0};\">{{0}}</span>".format(color.format(self.__defaultLineColor.red(),
																					self.__defaultLineColor.green(),
																					self.__defaultLineColor.blue()))
		line = foundations.strings.toString(occurence.text)
		start = spanFormat.format(line[:occurence.column])
		pattern = "<b>{0}</b>".format(line[occurence.column:occurence.column + occurence.length])
		end = spanFormat.format(line[occurence.column + occurence.length:])
		return "".join((start, pattern, end))

	def __formatReplaceMetrics(self, file, metrics):
		"""
		Formats the given replace metrics and returns the matching rich html text.

		:param file: File.
		:type file: unicode
		:param metrics: Replace metrics to format.
		:type metrics: unicode
		:return: Rich text.
		:rtype: unicode
		"""

		color = "rgb({0}, {1}, {2})"
		spanFormat = "<span style=\"color: {0};\">{{0}}</span>".format(color.format(self.__defaultLineColor.red(),
																					self.__defaultLineColor.green(),
																					self.__defaultLineColor.blue()))
		dirName, baseName = (os.path.dirname(file), os.path.basename(file))

		return "".join((spanFormat.format("'"),
						spanFormat.format(dirName),
						spanFormat.format(os.path.sep),
						baseName,
						spanFormat.format("' file: '"),
						foundations.strings.toString(metrics),
						spanFormat.format("' occurence(s) replaced!")))

	def __highlightOccurence(self, file, occurence):
		"""
		Highlights given file occurence.

		:param file: File containing the occurence.
		:type file: unicode
		:param occurence: Occurence to highlight.
		:type occurence: Occurence or SearchOccurenceNode
		"""

		if not self.__container.getEditor(file):
			cacheData = self.__filesCache.getContent(file)
			if cacheData:
				document = cacheData.document or self.__getDocument(cacheData.content)
				self.__container.loadDocument(document, file)
				self.__uncache(file)
			else:
				self.__container.loadFile(file)
		else:
			self.__container.setCurrentEditor(file)

		if not occurence:
			return

		cursor = self.__container.getCurrentEditor().textCursor()
		cursor.setPosition(occurence.position, QTextCursor.MoveAnchor)
		cursor.setPosition(occurence.position + occurence.length, QTextCursor.KeepAnchor)
		self.__container.getCurrentEditor().setTextCursor(cursor)

	def __getDocument(self, content):
		"""
		Returns a `QTextDocument <http://doc.qt.nokia.com/qtextdocument.html>`_ class instance
		with given content.

		:return: Document.
		:rtype: QTextDocument
		"""

		document = QTextDocument(QString(content))
		document.clearUndoRedoStacks()
		document.setModified(False)
		return document

	def __replaceWithinDocument(self, document, occurrences, replacementPattern):
		"""
		Replaces given pattern occurrences in given document using given settings.

		:param document: Document.
		:type document: QTextDocument
		:param replacementPattern: Replacement pattern.
		:type replacementPattern: unicode
		:return: Replaced occurrences count.
		:rtype: int
		"""

		cursor = QTextCursor(document)
		cursor.beginEditBlock()
		offset = count = 0
		for occurence in sorted(occurrences, key=lambda x: x.position):
			cursor.setPosition(offset + occurence.position, QTextCursor.MoveAnchor)
			cursor.setPosition(offset + occurence.position + occurence.length, QTextCursor.KeepAnchor)
			cursor.insertText(replacementPattern)
			offset += len(replacementPattern) - occurence.length
			count += 1
		cursor.endEditBlock()
		return count

	def __getSettings(self):
		"""
		Returns the current search and replace settings.

		:return: Settings.
		:rtype: dict
		"""

		return {"caseSensitive" : self.Case_Sensitive_checkBox.isChecked(),
				"wholeWord" : self.Whole_Word_checkBox.isChecked(),
				"regularExpressions" : self.Regular_Expressions_checkBox.isChecked()}

	def __interruptSearch(self):
		"""
		Interrupt the current search.
		"""

		if self.__searchWorkerThread:
			self.__searchWorkerThread.quit()
			self.__searchWorkerThread.wait()
			self.__container.engine.stopProcessing(warning=False)

	def __cache(self, file, content, document):
		"""
		Caches given file.

		:param file: File to cache.
		:type file: unicode
		:param content: File content.
		:type content: list
		:param document: File document.
		:type document: QTextDocument
		"""

		self.__filesCache.addContent(**{file : CacheData(content=content, document=document)})

	def __uncache(self, file):
		"""
		Uncaches given file.

		:param file: File to uncache.
		:type file: unicode
		"""

		if file in self.__filesCache:
			self.__filesCache.removeContent(file)

	def setSearchResults(self, searchResults):
		"""
		Sets the Model Nodes using given search results.

		:param searchResults: Search results.
		:type searchResults: list
		:return: Method success.
		:rtype: bool
		"""

		rootNode = umbra.ui.nodes.DefaultNode(name="InvisibleRootNode")
		for searchResult in searchResults:
			searchFileNode = SearchFileNode(name=searchResult.file,
											parent=rootNode)
			searchFileNode.update(searchResult)
			width = \
			max(self.__defaultLineNumberWidth,
			max([len(foundations.strings.toString(occurence.line)) for occurence in searchResult.occurrences]))
			for occurence in searchResult.occurrences:
				formatter = "{{0:>{0}}}".format(width)
				name = "{0}:{1}".format(formatter.format(occurence.line + 1).replace(" ", "&nbsp;"),
										self.__formatOccurence(occurence))
				searchOccurenceNode = SearchOccurenceNode(name=name,
														parent=searchFileNode)
				searchOccurenceNode.update(occurence)
		self.__model.initializeModel(rootNode)
		return True

	def setReplaceResults(self, replaceResults):
		"""
		Sets the Model Nodes using given replace results.

		:param replaceResults: Replace results.
		:type replaceResults: list
		:return: Method success.
		:rtype: bool
		"""

		rootNode = umbra.ui.nodes.DefaultNode(name="InvisibleRootNode")
		for file, metrics in sorted(replaceResults.iteritems()):
			replaceResultNode = ReplaceResultNode(name=self.__formatReplaceMetrics(file, metrics),
												parent=rootNode,
												file=file)
		self.__model.initializeModel(rootNode)
		return True

	def search(self):
		"""
		Searchs user defined locations for search pattern.

		:return: Method success.
		:rtype: bool
		"""

		self.__interruptSearch()

		searchPattern = self.Search_comboBox.currentText()
		replacementPattern = self.Replace_With_comboBox.currentText()
		if not searchPattern:
			return False

		SearchAndReplace.insertPattern(searchPattern, self.__searchPatternsModel)
		SearchAndReplace.insertPattern(replacementPattern, self.__replaceWithPatternsModel)

		location = umbra.ui.common.parseLocation(
		foundations.strings.toString(self.Where_lineEdit.text()) or \
		self.__targetsFormat.format(self.__defaultTarget))
		self.__ignoreHiddenFiles and location.filtersOut.append("\\\.|/\.")

		settings = self.__getSettings()

		self.__searchWorkerThread = Search_worker(self, searchPattern, location, settings)
		# Signals / Slots.
		self.__searchWorkerThread.searchFinished.connect(self.__searchWorkerThread__searchFinished)

		self.__container.engine.workerThreads.append(self.__searchWorkerThread)
		self.__container.engine.startProcessing("Searching In Files ...")
		self.__searchWorkerThread.start()
		return True

	def replace(self, nodes):
		"""
		Replaces user defined files search pattern occurrences with replacement pattern using given nodes.

		:param nodes: Nodes.
		:type nodes: list
		:return: Method success.
		:rtype: bool
		"""

		files = {}
		for node in nodes:
			if node.family == "SearchFile":
				files[node.file] = node.children
			elif node.family == "SearchOccurence":
				file = node.parent.file
				if not file in files:
					files[file] = []
				files[file].append(node)

		replacementPattern = self.Replace_With_comboBox.currentText()
		SearchAndReplace.insertPattern(replacementPattern, self.__replaceWithPatternsModel)

		replaceResults = {}
		for file, occurrences in files.iteritems():
			editor = self.__container.getEditor(file)
			if editor:
				document = editor.document()
			else:
				cacheData = self.__filesCache.getContent(file)
				if cacheData is None:
					LOGGER.warning(
					"!> {0} | '{1}' file doesn't exists in files cache!".format(self.__class__.__name__, file))
					continue

				content = self.__filesCache.getContent(file).content
				document = self.__getDocument(content)
				self.__cache(file, content, document)
			replaceResults[file] = self.__replaceWithinDocument(document, occurrences, replacementPattern)

		self.setReplaceResults(replaceResults)
		self.__container.engine.notificationsManager.notify(
		"{0} | '{1}' pattern occurence(s) replaced in '{2}' files!".format(self.__class__.__name__,
																	sum(replaceResults.values()),
																	len(replaceResults.keys())))

	def saveFiles(self, nodes):
		"""
		Saves user defined files using give nodes.

		:param nodes: Nodes.
		:type nodes: list
		:return: Method success.
		:rtype: bool
		"""

		metrics = {"Opened" : 0, "Cached" : 0}
		for node in nodes:
			file = node.file
			if self.__container.getEditor(file):
				if self.__container.saveFile(file):
					metrics["Opened"] += 1
					self.__uncache(file)
			else:
				cacheData = self.__filesCache.getContent(file)
				if cacheData is None:
					LOGGER.warning(
					"!> {0} | '{1}' file doesn't exists in files cache!".format(self.__class__.__name__, file))
					continue

				if cacheData.document:
					fileHandle = File(file)
					fileHandle.content = [cacheData.document.toPlainText().toUtf8()]
					if fileHandle.write():
						metrics["Cached"] += 1
						self.__uncache(file)
				else:
					LOGGER.warning(
					"!> {0} | '{1}' file document doesn't exists in files cache!".format(self.__class__.__name__, file))

		self.__container.engine.notificationsManager.notify(
		"{0} | '{1}' opened file(s) and '{2}' cached file(s) saved!".format(self.__class__.__name__,
																		metrics["Opened"],
																		metrics["Cached"]))
class FileSystemcPickleArtifact(fsa.FileSystemJsonArtifact):
    """Artifact which persists data by writing to the file system and using
    cjson for serializing metadata"""

    def meta_filename(self):
        return "%s-meta.pickle" % (self.hashstring)

    def cached_output_filename(self):
        return "%s-output.pickle" % (self.hashstring)

    def save_meta(self):
        m = {}
        attrs_to_persist = set(self.META_ATTRS + self.HASH_WHITELIST) - set(['input_data_dict', 'inputs'])
        for a in attrs_to_persist:
            if hasattr(self, a):
                v = getattr(self, a)
                m[a] = v

        m['inputs'] = {}
        for k, a in self.inputs().iteritems():
            a.save()
            m['inputs'][k] = a.hashstring

        with open(self.meta_filepath(), "w") as f:
            pickle.dump(m, f)

    def load_meta(self):
        with open(self.meta_filepath(), "r") as f:
            m = pickle.load(f)

        self._inputs = dict((k, self.__class__.retrieve(h)) for (k, h) in m.pop('inputs').iteritems())

        for k, v in m.iteritems():
            setattr(self, k, v)

        # We only store filter name, not filter class, need to retrieve class from name
        if hasattr(self, "filter_name") and not hasattr(self, "filter_class"):
            self.filter_class = [k for n,k in self.FILTERS.iteritems() if k.__name__ == self.filter_name][0]

    def save_output(self):
        if not self.is_complete():
            raise Exception("should not be calling save_output unless artifact is complete")

        if not self.binary_output:
            if not self.data_dict or len(self.data_dict) == 0:
                # Our filter has written directly to an output file
                # We need to load this into memory first
                self.data_dict = OrderedDict()
                f = open(self.filepath(), 'r')
                data = f.read()
                f.close()
                self.data_dict['1'] = data

            # need to preserve ordering but we can't serialize OrderedDict
            # using JSON, so add sortable numbers to keys to preserve order
            data_dict = {}
            MAX = 10000
            if len(self.data_dict) >= MAX:
                raise Exception("""There is an arbitrary limit of %s dict items,
                               you can increase this if you need to.""" % MAX)
            i = -1
            for k, v in self.data_dict.iteritems():
                i += 1
                data_dict["%04d:%s" % (i, k)] = v

            # Write the data file.
            with open(self.cached_output_filepath(), "w") as f:
                pickle.dump(data_dict, f)

            # Write the canonical file.
            if not os.path.exists(self.filepath()):
                with open(self.filepath(), 'w') as f:
                    f.write(self.output_text())

    def load_output(self):
        if not self.is_complete():
            raise Exception("should not be calling load_output unless artifact is complete")

        if not self.binary_output:
            f = open(self.cached_output_filepath(), "r")
            data_dict = pickle.load(f)
            f.close()

            self.data_dict = OrderedDict() # maybe unnecessary
            for x in sorted(data_dict.keys()):
                k = x.split(":", 1)[1]
                self.data_dict[k] = data_dict[x]
Exemple #20
0
class Dependency:
    ## Construct an empty dependency tree
    #  @param self the object reference
    #  @param silent  minimal feedback
    #  @param autobuild  warn rather than fail on multiple version dependnecies. XXX
    def __init__(self, silent=True, autobuild=False):
        ## The ASKAP top-level directory
        self.ASKAPROOT = os.environ.get("ASKAP_ROOT")
        if self.ASKAPROOT is None:
            msg = "ASKAP_ROOT environment variable is not defined"
            raise BuildError(msg)
        #
        self.DEPFILE = "dependencies" # The basename of the dependency file
        self.INSTALL_SUBDIR = "install"
        self._deps = OrderedDict()
        #
        self._bindirs = []
        self._incdirs = []
        self._libdirs = []
        self._rootdirs = []
        #
        self._cppflags = []  # XXX "defs" in package.info. LOFAR/log4cxx
        #
        self._env = []
        self._jars = []
        self._libs = []
        self._packages = []
        #
        self._ldlibpath = ""
        self._pypath = ""
        #
        self._autobuild = autobuild
        self._silent = silent   # mimimal output
        self.selfupdate = False # should object request updates from svn


    def q_print(self, msg):
        if self._silent:
            return
        utils.q_print(msg)


    ## Get the path of the specified dependency package
    # @param self the current object
    # @param key the label of the package dependency
    # @return the path (relative to ASKAP_ROOT) to the package
    def get_dep_path(self, key):
        return self._deps[key]["path"]


    # Used by "in" test.
    # object.__contains__(self, item)
    #
    # Called to implement membership test operators. Should return true if item
    # is in self, false otherwise. For mapping objects, this should consider
    # the keys of the mapping rather than the values or the key-item pairs.
    #
    # For objects that do not define __contains__(), the membership test first
    # tries iteration via __iter__(), then the old sequence iteration protocol
    # via __getitem__(), see this section in the language reference.
    #
    # http://docs.python.org/reference/datamodel.html

    def __contains__(self, key):
        return self._deps.has_key(key)


    ## Get the absolute path to the dependency packages installed location
    # @param self the current object
    # @param key the label of the package dependency
    # @return the absolute path to the package installed location
    def get_install_path(self, key):
        rel_path  = self._deps[key]["path"]
        full_path = os.path.join(self.ASKAPROOT, rel_path, self.INSTALL_SUBDIR)
        return os.path.abspath(full_path)

    def get_path(self):
        return os.path.pathsep.join(self._bindirs)

    ## Get the CPPFLAGS retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of library names
    def get_libs(self, mapped=False):
        if mapped:
            return self._libs[:]
        else:
            return [ m[0] for m in self._libs ]


    ## Get the environment variables retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a dictionary of ENVVAR => value pairs
    def get_env(self):
        return dict([i.split("=") for i in self._env])

    ## Get the the java classpath for the depencies
    #  @param self the object reference
    #  @return a classpath string of the form x/y/z.jar:a/b/c.jar
    def get_classpath(self):
        return os.path.pathsep.join(self._jars)

    ## Get the root directories of the tags retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of directory names
    def get_rootdirs(self, mapped=False): # XXX used in ant.py builder with mapped=true.
        if mapped:
            return [ (k, os.path.join( self.ASKAPROOT, v['path'])) \
                for k,v in self._deps.iteritems()]
        return self._rootdirs[:]


    ## Get the LIBRARY directories retrieved in the dependency analysis
    #  @param self the object reference
    #  @param mapped return directory tuples (rootdir, libdir)
    #  @return a list of library directories or tuples of rootdirs and libdirs
    def get_librarydirs(self, mapped=False):
        if mapped:
            return self._libdirs[:]
        else:
            return [ m[0] for m in self._libdirs ]


    ## Get the LD_LIBRARY_PATH accumulated in the dependency analysis
    #  @param self the object reference
    #  @return a string representing the LD_LIBRARY_PATH
    def get_ld_library_path(self):
        return self._ldlibpath.strip(":")


    ## Get the INCLUDE directories retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of header file directories
    def get_includedirs(self):
        return self._incdirs[:]


    ## Get the CPPFLAGS retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list preprocessor flags
    def get_cppflags(self):
        return self._cppflags[:]


    def get_pythonpath(self):
        return self._pypath.strip(":")


    ## Get a list of doxygen tag files in the dependencies. This is used for
    #  cross-referencing the documentation
    #  @todo Re-enable: This has been disabled until it is working for python
    #  @param self the object reference
    #  @return a list of TAGFILES entries
    # XXX used only in scons_tools/askap_package.py
    def get_tagfiles(self):
        tagfiles = []
        for pth in self._rootdirs:
            tagname = utils.tag_name(pth)
            tagpath = os.path.join(pth, tagname)
            if os.path.exists(tagpath):
                tagfiles.append('"%s=%s/html"' % (tagpath, pth) )
        return tagfiles


    def _get_dependencies(self, package):
        codename = utils.get_platform()['codename']
        hostname = socket.gethostname().split(".")[0]

        for ext in ['default', codename, hostname]:
            if ext:
                depfile = '%s.%s' % (self.DEPFILE, ext)
                if package:
                    depfile = os.path.join(self.ASKAPROOT, package, depfile)
                if self.selfupdate:
                    # always update if it is the "root/target" package
                    basedir = os.path.split(depfile)[0] or "."
                    if not os.path.exists(basedir):
                        utils.update_tree(basedir)
                self._get_depfile(depfile)


    def _get_depfile(self, depfile, overwrite=False):
        if not os.path.exists(depfile):
            # assume no dependencies
            return
        dfh = file(depfile)
        for line in dfh.readlines():
            line = line.strip()
            if line.startswith("#"): continue
            kv = line.split("=", 1)
            if len(kv) == 2:
                key = kv[0].strip()
                value = kv[1].strip()
                # see if the file explicitly names any libs
                lspl = value.split(";")
                libs = None
                if len(lspl) > 1:
                    libs = lspl[1].strip().split()
                value = lspl[0]
                self._add_dependency(key, value, libs, overwrite)
                if not value.startswith("/"):
                    # recurse into ASKAP dependencies
                    # otherwise just move on as we specified system dependency
                    # which will not have a dependency file
                    self._packages.append(value)
                    self._get_dependencies(value)

        dfh.close()


    def _get_info(self, packagedir):
        info =  {
                # A single directory path relative to the install directory.
                'bindir':  'bin',
                'distdir': 'dist',
                'incdir':  'include',
                'libdir':  'lib',
                # Space separated lists. XXX Default should be '[]'?
                'defs' :    None,
                'env':      None,
                'jars':     None,
                'libs':     None,
                # Define a single python module name and version.
                # e.g. pymodule=numpy==1.2.0
                'pymodule': None,
                }
        sslists = ['defs', 'env', 'jars', 'libs']
        infofile = os.path.join(packagedir, 'package.info')

        if os.path.exists(infofile):
            f = file(infofile)
            for line in f.readlines():
                line = line.strip()
                if line.startswith("#"): continue
                kv = line.split("=", 1)
                if len(kv) == 2:
                    key = kv[0].strip()
                    value = kv[1].strip()
                    if key in info.keys():
                        if key in sslists:
                            info[key] = value.split()
                        else:
                            info[key] = value
            f.close()
        return info


    def _add_dependency(self, key, value, libs, overwrite=False):
        if self._deps.has_key(key):
            # deal with potential symbolic links for 'default' packages
            paths = [self._deps[key]["path"], value]
            outpaths = []
            for pth in paths:
                if not pth.startswith("/"):
                    pth = os.path.join(os.environ["ASKAP_ROOT"], pth)
                pth = os.path.realpath(pth)
                outpaths.append(pth)
            if outpaths[0] == outpaths[1]:
                if libs:
                    if self._deps[key]["libs"] is not None:
                        # prepend the libs
                        self._deps[key]["libs"] = libs + self._deps[key]["libs"]
                    else:
                        self._deps[key]["libs"] = libs
                    self._deps.toend(key)
                else:
                    # another dependency, so move it to the end, so link
                    # order is correct
                    self._deps.toend(key)
                return
            else:
                if overwrite:
                    self._deps[key]["path"] = value
                    self.q_print("info: Overwriting default package dependency '%s' with host specific package (from %s)" % (key, value) )
                elif self._autobuild: # XXX maybe a mistake?
                    self.q_print("warn: Possible multiple version dependency \n\
                    %s != %s" % (self._deps[key]["path"], value))

                else:
                    raise BuildError("Multiple version dependency \n\
                    %s != %s" % (self._deps[key]["path"], value))
        else:
            self.q_print("info: Adding package dependency '%s' (from %s)" %
                          (key, value))
            # now update the dependency itself
            # XXX only used in Tools/scons_tools/askap_package.py
            if self.selfupdate:
                utils.update_tree(value)
            self._deps[key] = {"path": value, "libs": libs}


    def _remove_duplicates(self, values):
        # find unique elements
        libs = [v[0] for v in values]
        for k in set(libs):
            # remove all but last duplicate entry
            while libs.count(k) > 1:
                idx = libs.index(k)
                libs.pop(idx)
                values.pop(idx)

    ## Add a ThirdPartyLibrary or ASKAP package to the environment
    #  This will add the package path in ASKAP_ROOT
    #  @param self the object reference
    #  @param pkgname The name of the package as in the repository, e.g.
    #  lapack. Default None means that this is defined in local
    #  dependencies.xyz
    #  @param tag The location of the package, e.g.
    #  3rdParty/lapack-3.1.1/lapack-3.1.1
    #  @param libs The name of the libraries to link against,
    #  default None is the same as the pkgname
    #  @param libdir The location of the library dir relative to the package,
    #  default None which will use settings in the package.info file
    #  @param incdir The location of the include dir relative to the package,
    #  default None which will use settings in the package.info file
    #  @param pymodule the 'require' statement to specify this dependency
    #  statement, e.g. "askap.loghandlers==current"
    def add_package(self, pkgname=None, tag=None,
                    libs=None, libdir=None, incdir=None, bindir=None,
                    pymodule=None):
        self._deps = OrderedDict()

        if pkgname:
            if not tag:
                BuildError("No tag specified")
            if self.selfupdate:
                #if not os.path.exists(tag):
                utils.update_tree(tag)
            self._add_path(pkgname, self.ASKAPROOT, tag, libs, libdir,
                          incdir, bindir, pymodule)
            self.q_print("info: Adding package '%s'" % pkgname)

        if tag:
            tag = os.path.join(self.ASKAPROOT, tag)

        self._get_dependencies(tag)

        parent = ''
        for key, value in self._deps.iteritems():
            self._add_path(key, self.ASKAPROOT, value["path"],
                           libs=value["libs"], parent=parent)
            parent = value["path"]


    # Add a ASKAP repository path to the environment
    # This sets up LIBPATH and CPPPATH
    def _add_path(self, pkgname, root, tag, parent='', libs=None,
                 libdir=None, incdir=None, bindir=None,
                 pymodule=None):
        loc = None

        if tag.startswith("/"): # external package
            loc = tag
        else:                   # ASKAP package or 3rdParty library
            loc = os.path.join(root, tag)

        rloc = os.path.relpath(loc, self.ASKAPROOT)
        if not os.path.exists(loc):
            raise BuildError("Dependency directory '%s' does not exist (requested by %s)." % (rloc,parent))

        self._rootdirs += [loc]
        info = self._get_info(loc) # get optional package info
        idir = os.path.join(loc, self.INSTALL_SUBDIR) # actual installion.

        if not bindir: # add bin directory
            bindir = info["bindir"]

        if bindir: # None means disabled in info file
            pth = os.path.join(idir, bindir)
            if os.path.exists(pth):
                self._bindirs += [pth]

        if not incdir: # add include directory
            incdir = info["incdir"]

        if incdir: # None means disabled in info file
            pth = os.path.join(idir, incdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: incdir '%s' does not exist." % pth)
            else:
                self._incdirs += [pth]

        if not libdir: # add library directory
            libdir = info["libdir"]

        if libdir: # None means disabled in info file
            pth = os.path.join(idir, libdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: libdir '%s' does not exist." % pth)
            else:
                self._ldlibpath += os.path.pathsep+pth
                self._libdirs += [(pth, idir)]

        libs = libs or info["libs"]
        addlibs = True
        
        if isinstance(libs, list) and len(libs) == 0:
            addlibs = False

        libs = libs or pkgname

        if not isinstance(libs, list):
            libs = [libs]

        if addlibs: # only add lib if it's not a python module
            nlibs = []
            for lib in libs:
                instdir = idir
                if not glob.glob("{0}/lib{1}*".format(os.path.join(idir, 
                                                                   libdir),
                                                      lib)):
                    instdir = ""
                nlibs.append((lib, instdir))
            self._libs += nlibs
            libs = self._libs[:] # copy
            self._remove_duplicates(libs)
            self._libs = libs

        if info["defs"]: # add package defines
            self._cppflags += info["defs"]

        if info["env"]: # add environment variables
            self._env += info["env"]

        # check whether it is python, i.e. pymodule entry in package.info
        if not pymodule:
            pymodule = info["pymodule"]
        if pymodule:
            pth = os.path.join(idir, libdir, utils.get_site_dir())
            if self._pypath.find(pth) < 1:
                self._pypath = os.path.pathsep.join([pth, self._pypath])
        
        if info["jars"]:
            pth = os.path.join(idir, libdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: libdir '%s' does not exist." % pth)
            for jar in info["jars"]:
                jar = os.path.join(pth, jar)
                if jar not in self._jars:
                    self._jars.append(jar)
Exemple #21
0
r = con.store_result()
r = r.fetch_row(maxrows=0)
total = r[0][0]

query(query2)

r = con.store_result()
r = r.fetch_row(maxrows=0)

for i in range(0, 25):
    info[abc[i]] = r[0][i]

j = 0
l = 0

for i, k in info.iteritems():
    print(aminoacids(0)[i] + ", " + str(k*100/total)[0:4] + "%")
    l = l + int(k)
    j = j + (k*100/total)


print("%tot: " + str(j))
print("#TotRes: " + str(l))

query('SELECT COUNT(*) FROM ' + count_table)
r = con.store_result()
r = r.fetch_row(maxrows=0)
print("#TotSeq: " + str(r[0][0]))

print("En la versión de 2014_03, SwissProt contiene 542782 entradas de secuencia, que suman 193019802 aminoácidos =)")
print("ftp://ftp.uniprot.org/pub/databases/uniprot/previous_releases/release-2014_03/knowledgebase/")
Exemple #22
0
class Dependency:
    ## Construct an empty dependency tree
    #  @param self the object reference
    #  @param silent  minimal feedback
    #  @param autobuild  warn rather than fail on multiple version dependnecies. XXX
    def __init__(self, silent=True, autobuild=False):
        ## The ASKAP top-level directory
        self.ASKAPROOT = os.environ.get("ASKAP_ROOT")
        if self.ASKAPROOT is None:
            msg = "ASKAP_ROOT environment variable is not defined"
            raise BuildError(msg)
        #
        self.DEPFILE = "dependencies"  # The basename of the dependency file
        self.INSTALL_SUBDIR = "install"
        self._deps = OrderedDict()
        #
        self._bindirs = []
        self._incdirs = []
        self._libdirs = []
        self._rootdirs = []
        #
        self._cppflags = []  # XXX "defs" in package.info. LOFAR/log4cxx
        #
        self._env = []
        self._jars = []
        self._libs = []
        self._packages = []
        #
        self._ldlibpath = ""
        self._pypath = ""
        #
        self._autobuild = autobuild
        self._silent = silent  # mimimal output
        self.selfupdate = False  # should object request updates from svn

    def q_print(self, msg):
        if self._silent:
            return
        utils.q_print(msg)

    ## Get the path of the specified dependency package
    # @param self the current object
    # @param key the label of the package dependency
    # @return the path (relative to ASKAP_ROOT) to the package
    def get_dep_path(self, key):
        return self._deps[key]["path"]

    # Used by "in" test.
    # object.__contains__(self, item)
    #
    # Called to implement membership test operators. Should return true if item
    # is in self, false otherwise. For mapping objects, this should consider
    # the keys of the mapping rather than the values or the key-item pairs.
    #
    # For objects that do not define __contains__(), the membership test first
    # tries iteration via __iter__(), then the old sequence iteration protocol
    # via __getitem__(), see this section in the language reference.
    #
    # http://docs.python.org/reference/datamodel.html

    def __contains__(self, key):
        return self._deps.has_key(key)

    ## Get the absolute path to the dependency packages installed location
    # @param self the current object
    # @param key the label of the package dependency
    # @return the absolute path to the package installed location
    def get_install_path(self, key):
        rel_path = self._deps[key]["path"]
        full_path = os.path.join(self.ASKAPROOT, rel_path, self.INSTALL_SUBDIR)
        return os.path.abspath(full_path)

    def get_path(self):
        return os.path.pathsep.join(self._bindirs)

    ## Get the CPPFLAGS retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of library names
    def get_libs(self, mapped=False):
        if mapped:
            return self._libs[:]
        else:
            return [m[0] for m in self._libs]

    ## Get the environment variables retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a dictionary of ENVVAR => value pairs
    def get_env(self):
        return dict([i.split("=") for i in self._env])

    ## Get the the java classpath for the depencies
    #  @param self the object reference
    #  @return a classpath string of the form x/y/z.jar:a/b/c.jar
    def get_classpath(self):
        return os.path.pathsep.join(self._jars)

    ## Get the root directories of the tags retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of directory names
    def get_rootdirs(
            self,
            mapped=False):  # XXX used in ant.py builder with mapped=true.
        if mapped:
            return [ (k, os.path.join( self.ASKAPROOT, v['path'])) \
                for k,v in self._deps.iteritems()]
        return self._rootdirs[:]

    ## Get the LIBRARY directories retrieved in the dependency analysis
    #  @param self the object reference
    #  @param mapped return directory tuples (rootdir, libdir)
    #  @return a list of library directories or tuples of rootdirs and libdirs
    def get_librarydirs(self, mapped=False):
        if mapped:
            return self._libdirs[:]
        else:
            return [m[0] for m in self._libdirs]

    ## Get the LD_LIBRARY_PATH accumulated in the dependency analysis
    #  @param self the object reference
    #  @return a string representing the LD_LIBRARY_PATH
    def get_ld_library_path(self):
        return self._ldlibpath.strip(":")

    ## Get the INCLUDE directories retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list of header file directories
    def get_includedirs(self):
        return self._incdirs[:]

    ## Get the CPPFLAGS retrieved in the dependency analysis
    #  @param self the object reference
    #  @return a list preprocessor flags
    def get_cppflags(self):
        return self._cppflags[:]

    def get_pythonpath(self):
        return self._pypath.strip(":")

    ## Get a list of doxygen tag files in the dependencies. This is used for
    #  cross-referencing the documentation
    #  @todo Re-enable: This has been disabled until it is working for python
    #  @param self the object reference
    #  @return a list of TAGFILES entries
    # XXX used only in scons_tools/askap_package.py
    def get_tagfiles(self):
        tagfiles = []
        for pth in self._rootdirs:
            tagname = utils.tag_name(pth)
            tagpath = os.path.join(pth, tagname)
            if os.path.exists(tagpath):
                tagfiles.append('"%s=%s/html"' % (tagpath, pth))
        return tagfiles

    def _get_dependencies(self, package):
        codename = utils.get_platform()['codename']
        hostname = socket.gethostname().split(".")[0]

        for ext in ['default', codename, hostname]:
            if ext:
                depfile = '%s.%s' % (self.DEPFILE, ext)
                if package:
                    depfile = os.path.join(self.ASKAPROOT, package, depfile)
                if self.selfupdate:
                    # always update if it is the "root/target" package
                    basedir = os.path.split(depfile)[0] or "."
                    if not os.path.exists(basedir):
                        utils.update_tree(basedir)
                self._get_depfile(depfile)

    def _get_depfile(self, depfile, overwrite=False):
        if not os.path.exists(depfile):
            # assume no dependencies
            return
        dfh = file(depfile)
        for line in dfh.readlines():
            line = line.strip()
            if line.startswith("#"): continue
            kv = line.split("=", 1)
            if len(kv) == 2:
                key = kv[0].strip()
                value = kv[1].strip()
                # see if the file explicitly names any libs
                lspl = value.split(";")
                libs = None
                if len(lspl) > 1:
                    libs = lspl[1].strip().split()
                value = lspl[0]
                self._add_dependency(key, value, libs, overwrite)
                if not value.startswith("/"):
                    # recurse into ASKAP dependencies
                    # otherwise just move on as we specified system dependency
                    # which will not have a dependency file
                    self._packages.append(value)
                    self._get_dependencies(value)

        dfh.close()

    def _get_info(self, packagedir):
        info = {
            # A single directory path relative to the install directory.
            'bindir': 'bin',
            'distdir': 'dist',
            'incdir': 'include',
            'libdir': 'lib',
            # Space separated lists. XXX Default should be '[]'?
            'defs': None,
            'env': None,
            'jars': None,
            'libs': None,
            # Define a single python module name and version.
            # e.g. pymodule=numpy==1.2.0
            'pymodule': None,
        }
        sslists = ['defs', 'env', 'jars', 'libs']
        infofile = os.path.join(packagedir, 'package.info')

        if os.path.exists(infofile):
            f = file(infofile)
            for line in f.readlines():
                line = line.strip()
                if line.startswith("#"): continue
                kv = line.split("=", 1)
                if len(kv) == 2:
                    key = kv[0].strip()
                    value = kv[1].strip()
                    if key in info.keys():
                        if key in sslists:
                            info[key] = value.split()
                        else:
                            info[key] = value
            f.close()
        return info

    def _add_dependency(self, key, value, libs, overwrite=False):
        if self._deps.has_key(key):
            # deal with potential symbolic links for 'default' packages
            paths = [self._deps[key]["path"], value]
            outpaths = []
            for pth in paths:
                if not pth.startswith("/"):
                    pth = os.path.join(os.environ["ASKAP_ROOT"], pth)
                pth = os.path.realpath(pth)
                outpaths.append(pth)
            if outpaths[0] == outpaths[1]:
                if libs:
                    if self._deps[key]["libs"] is not None:
                        # prepend the libs
                        self._deps[key][
                            "libs"] = libs + self._deps[key]["libs"]
                    else:
                        self._deps[key]["libs"] = libs
                    self._deps.toend(key)
                else:
                    # another dependency, so move it to the end, so link
                    # order is correct
                    self._deps.toend(key)
                return
            else:
                if overwrite:
                    self._deps[key]["path"] = value
                    self.q_print(
                        "info: Overwriting default package dependency '%s' with host specific package (from %s)"
                        % (key, value))
                elif self._autobuild:  # XXX maybe a mistake?
                    self.q_print(
                        "warn: Possible multiple version dependency \n\
                    %s != %s" % (self._deps[key]["path"], value))

                else:
                    raise BuildError("Multiple version dependency \n\
                    %s != %s" % (self._deps[key]["path"], value))
        else:
            self.q_print("info: Adding package dependency '%s' (from %s)" %
                         (key, value))
            # now update the dependency itself
            # XXX only used in Tools/scons_tools/askap_package.py
            if self.selfupdate:
                utils.update_tree(value)
            self._deps[key] = {"path": value, "libs": libs}

    def _remove_duplicates(self, values):
        # find unique elements
        libs = [v[0] for v in values]
        for k in set(libs):
            # remove all but last duplicate entry
            while libs.count(k) > 1:
                idx = libs.index(k)
                libs.pop(idx)
                values.pop(idx)

    ## Add a ThirdPartyLibrary or ASKAP package to the environment
    #  This will add the package path in ASKAP_ROOT
    #  @param self the object reference
    #  @param pkgname The name of the package as in the repository, e.g.
    #  lapack. Default None means that this is defined in local
    #  dependencies.xyz
    #  @param tag The location of the package, e.g.
    #  3rdParty/lapack-3.1.1/lapack-3.1.1
    #  @param libs The name of the libraries to link against,
    #  default None is the same as the pkgname
    #  @param libdir The location of the library dir relative to the package,
    #  default None which will use settings in the package.info file
    #  @param incdir The location of the include dir relative to the package,
    #  default None which will use settings in the package.info file
    #  @param pymodule the 'require' statement to specify this dependency
    #  statement, e.g. "askap.loghandlers==current"
    def add_package(self,
                    pkgname=None,
                    tag=None,
                    libs=None,
                    libdir=None,
                    incdir=None,
                    bindir=None,
                    pymodule=None):
        self._deps = OrderedDict()

        if pkgname:
            if not tag:
                BuildError("No tag specified")
            if self.selfupdate:
                #if not os.path.exists(tag):
                utils.update_tree(tag)
            self._add_path(pkgname, self.ASKAPROOT, tag, libs, libdir, incdir,
                           bindir, pymodule)
            self.q_print("info: Adding package '%s'" % pkgname)

        if tag:
            tag = os.path.join(self.ASKAPROOT, tag)

        self._get_dependencies(tag)

        parent = ''
        for key, value in self._deps.iteritems():
            self._add_path(key,
                           self.ASKAPROOT,
                           value["path"],
                           libs=value["libs"],
                           parent=parent)
            parent = value["path"]

    # Add a ASKAP repository path to the environment
    # This sets up LIBPATH and CPPPATH
    def _add_path(self,
                  pkgname,
                  root,
                  tag,
                  parent='',
                  libs=None,
                  libdir=None,
                  incdir=None,
                  bindir=None,
                  pymodule=None):
        loc = None

        if tag.startswith("/"):  # external package
            loc = tag
        else:  # ASKAP package or 3rdParty library
            loc = os.path.join(root, tag)

        rloc = os.path.relpath(loc, self.ASKAPROOT)
        if not os.path.exists(loc):
            raise BuildError(
                "Dependency directory '%s' does not exist (requested by %s)." %
                (rloc, parent))

        self._rootdirs += [loc]
        info = self._get_info(loc)  # get optional package info
        idir = os.path.join(loc, self.INSTALL_SUBDIR)  # actual installion.

        if not bindir:  # add bin directory
            bindir = info["bindir"]

        if bindir:  # None means disabled in info file
            pth = os.path.join(idir, bindir)
            if os.path.exists(pth):
                self._bindirs += [pth]

        if not incdir:  # add include directory
            incdir = info["incdir"]

        if incdir:  # None means disabled in info file
            pth = os.path.join(idir, incdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: incdir '%s' does not exist." % pth)
            else:
                self._incdirs += [pth]

        if not libdir:  # add library directory
            libdir = info["libdir"]

        if libdir:  # None means disabled in info file
            pth = os.path.join(idir, libdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: libdir '%s' does not exist." % pth)
            else:
                self._ldlibpath += os.path.pathsep + pth
                self._libdirs += [(pth, idir)]

        libs = libs or info["libs"]
        addlibs = True

        if isinstance(libs, list) and len(libs) == 0:
            addlibs = False

        libs = libs or pkgname

        if not isinstance(libs, list):
            libs = [libs]

        if addlibs:  # only add lib if it's not a python module
            nlibs = []
            for lib in libs:
                instdir = idir
                if not glob.glob("{0}/lib{1}*".format(
                        os.path.join(idir, libdir), lib)):
                    instdir = ""
                nlibs.append((lib, instdir))
            self._libs += nlibs
            libs = self._libs[:]  # copy
            self._remove_duplicates(libs)
            self._libs = libs

        if info["defs"]:  # add package defines
            self._cppflags += info["defs"]

        if info["env"]:  # add environment variables
            self._env += info["env"]

        # check whether it is python, i.e. pymodule entry in package.info
        if not pymodule:
            pymodule = info["pymodule"]
        if pymodule:
            pth = os.path.join(idir, libdir, utils.get_site_dir())
            if self._pypath.find(pth) < 1:
                self._pypath = os.path.pathsep.join([pth, self._pypath])

        if info["jars"]:
            pth = os.path.join(idir, libdir)
            if not os.path.exists(pth):
                if not pymodule:
                    self.q_print("warn: libdir '%s' does not exist." % pth)
            for jar in info["jars"]:
                jar = os.path.join(pth, jar)
                if jar not in self._jars:
                    self._jars.append(jar)
Exemple #23
0
                seqFile.write('\t'.join(gwasId))
            else:
                logger.info(
                    "Not update ID: id file does not have correct entry for " +
                    str(key))
                seqFile.write('\t'.join(gwasId))
        else:
            gwasId = os.path.basename(fn).replace('.pileup', '')
            seqFile.write('\t'.join([gwasId, gwasId]))

        # write out ref, alt, qual
        totalDepth = 0
        siteExcluded = 0
        siteHasPileup = 0
        siteNoPileup = 0
        for k, v in colDict.iteritems():
            if k in excludePos:
                seqFile.write('\t0 0 0')
                logger.debug('Exclude\t%s\n' % k)
                siteExcluded += 1
                continue
            if k in res:
                ref, alt, qual = res[k]
                seqFile.write('\t%d %d %d' % (ref + alt, ref, qual))
                siteHasPileup += 1
                totalDepth += (ref + alt)
            else:
                #"no pile up at position"
                seqFile.write('\t0 0 0')  # last 0 mean quality zero
                siteNoPileup += 1
        seqFile.write('\n')
Exemple #24
0
class Artifact(object):
    HASH_WHITELIST = Constants.ARTIFACT_HASH_WHITELIST
    MAX_DATA_DICT_DECIMALS = 5
    MAX_DATA_DICT_LENGTH = 10 ** MAX_DATA_DICT_DECIMALS
    META_ATTRS = [
        'additional_inputs',
        'binary_input',
        'binary_output',
        'created_by',
        'document_key',
        'ext',
        'final',
        'hashfunction',
        'initial',
        'logstream',
        'key',
        'name',
        'output_hash',
        'state',
        'stdout',
        'virtual'
    ]

    BINARY_EXTENSIONS = [
        '.docx',
        '.eot',
        '.epub',
        '.gif',
        '.gz',
        '.jpg',
        '.kch',
        '.odt',
        '.pdf',
        '.png',
        '.rtf',
        '.sqlite',
        '.sqlite3',
        '.swf',
        '.tgz',
        '.ttf',
        '.wav',
        '.woff',
        '.xls',
        '.zip'
    ]

    def __init__(self):
        if not hasattr(self.__class__, 'FILTERS'):
            self.__class__.FILTERS = dexy.introspect.filters(Constants.NULL_LOGGER)

        self._inputs = {}
        self.additional = None
        self.additional_inputs = []
        self.args = {}
        self.args['globals'] = {}
        self.artifacts_dir = 'artifacts' # TODO don't hard code
        self.batch_id = None
        self.batch_order = None
        self.binary_input = None
        self.binary_output = None
        self.controller_args = {}
        self.controller_args['globals'] = {}
        self.created_by = None
        self.ctime = None
        self.data_dict = OrderedDict()
        self.dexy_version = Version.VERSION
        self.dirty = False
        self.document_key = None
        self.elapsed = 0
        self.ext = None
        self.final = None
        self.finish_time = None
        self.hashfunction = 'md5'
        self.initial = None
        self.inode = None
        self.input_data_dict = OrderedDict()
        self.is_last = False
        self.key = None
        self.log = logging.getLogger()
        self.logstream = ""
        self.mtime = None
        self.name = None
        self.source = None
        self.start_time = None
        self.state = 'new'
        self.stdout = None
        self.virtual_docs = None

    def keys(self):
        return self.data_dict.keys()

    def may_have_kv_storage(self):
        return self.binary_output and (self.ext in dexy.helpers.KeyValueData.EXTENSIONS)

    def __getitem__(self, key):
        if not hasattr(self, "_storage") and self.binary_output and (self.ext in dexy.helpers.KeyValueData.EXTENSIONS):
            self.setup_kv_storage()

        if hasattr(self, "_storage"):
            if self._storage.mode == "write":
                # Change from write mode to read mode...
                self.setup_kv_storage()
            return self._storage.retrieve(key)
        elif self.data_dict.has_key(key):
            return self.data_dict[key]
        elif hasattr(self, key):
            return getattr(self, key)
        elif self.ext in dexy.helpers.KeyValueData.EXTENSIONS:
            self.setup_kv_storage()
            return self._storage.retrieve(key)
        else:
            raise dexy.commands.UserFeedback("Can't find key '%s' in %s" % (key, self.key))

    def __unicode__(self):
        """
        When d[key] is used without attributes being accessed, need to return artifact output text.

        Jinja calls the __unicode__ method so we override that.
        """
        return self.output_text()

    def is_complete(self):
        return str(self.state) == 'complete'

    @classmethod
    def retrieve(klass, hashstring, hashfunction='md5'):
        if not hasattr(klass, 'retrieved_artifacts'):
            klass.retrieved_artifacts = {}
        if klass.retrieved_artifacts.has_key(hashstring):
            return klass.retrieved_artifacts[hashstring]
        else:
            artifact = klass()
            artifact.hashstring = hashstring
            artifact.hashfunction = hashfunction
            artifact.load()
            klass.retrieved_artifacts[hashstring] = artifact
            return artifact

    def load(self):
        self.load_meta()
        self.load_input()
        if self.is_complete() and not self.is_loaded():
            self.load_output()

    def load_inputs(self):
        for a in self.inputs():
            a.load()

    def save(self):
        if self.is_abstract():
            pass # For testing.
        elif not self.hashstring:
            raise Exception("can't persist an artifact without a hashstring!")
        else:
            self.save_meta()
            if self.is_complete() and not self.is_output_cached():
                try:
                    self.save_output()
                except IOError as e:
                    print "An error occured while saving %s" % self.key
                    raise e

    def is_abstract(self):
        return not hasattr(self, 'save_meta')

    def filter_args(self):
        """
        Returns args specified in the .dexy file for this filter alias.
        """
        args = {}
        for a in self.filter_class.ALIASES:
            if self.args.has_key(a):
                try:
                    args.update(self.args[a])
                except ValueError as e:
                    if "dictionary update sequence element" in e.message:
                        raise dexy.commands.UserFeedback("You need to supply a dict to argument '%s', rather than the single value '%s'" % (a, self.args[a]))
                    else:
                        print self.args[a]
                        raise e
        return args

    def setup_initial(self):
        """
        Set up an initial artifact (the first artifact in a document's filter chain).
        """
        if self.args.has_key('binary'):
            self.binary_input = self.args['binary']
        else:
            self.binary_input = (self.doc.ext in self.BINARY_EXTENSIONS)

        self.binary_output = self.binary_input
        self.ext = self.doc.ext
        self.initial = True
        self.virtual = self.doc.virtual
        self.virtual_docs = self.doc.virtual_docs

        if self.args.has_key('final'):
            self.final = self.args['final']
        elif os.path.basename(self.name).startswith("_"):
            self.final = False

        if not self.doc.virtual:
            stat_info = os.stat(self.name)
            self.ctime = stat_info[stat.ST_CTIME]
            self.mtime = stat_info[stat.ST_MTIME]
            self.inode = stat_info[stat.ST_INO]

        self.set_data(self.doc.initial_artifact_data())

        # TODO remove?
        if not self.data_dict:
            raise Exception("no data dict!")
        elif len(self.data_dict) == 0:
            raise Exception("data dict has len 0!")

        self.state = 'complete'

    def setup_from_filter_class(self):
        # cache filter class source code so it only has to be calculated once
        filter_class_source_const = "SOURCE_CODE_%s" % self.filter_class.__name__
        if not hasattr(self.filter_class, filter_class_source_const):
            # get source code of this filter class + all parent filter classes.
            source = ""
            klass = self.filter_class

            # get source code from filter class and all parent classes
            while klass != dexy.dexy_filter.DexyFilter:
                source += inspect.getsource(klass)
                klass = klass.__base__

            # and then get source code of DexyFilter class
            source += inspect.getsource(dexy.dexy_filter.DexyFilter)

            filter_class_source_hash = self.compute_hash(source)
            setattr(self.filter_class, filter_class_source_const, filter_class_source_hash)
            assert filter_class_source_hash == getattr(self.filter_class, filter_class_source_const)
            self.log.debug("Source code hash for %s is %s" % (self.filter_class.__name__, filter_class_source_hash))

        if not hasattr(self.filter_class, 'VERSION'):
            filter_version = self.filter_class.version(self.log)
            self.filter_class.VERSION = filter_version

        self.filter_name = self.filter_class.__name__
        self.filter_source = getattr(self.filter_class, filter_class_source_const)
        self.filter_version = self.filter_class.VERSION

        if self.final is None:
            self.final = self.filter_class.FINAL

    def setup_from_previous_artifact(self, previous_artifact):
        for a in ['args', 'final', 'mtime', 'ctime', 'inode', 'virtual', 'virtual_docs']:
                setattr(self, a, getattr(previous_artifact, a))

        # Look for additional inputs in previous artifacts or previous
        # artifacts' inputs.
        for k, a in previous_artifact.inputs().iteritems():
            if a.additional and not k in self._inputs:
                self.add_input(k, a)
            elif not k in self._inputs and not a.virtual:
                # We should have all other inputs already. Validate this.
                raise Exception("Missing input %s" % k)

            for kk, aa in a.inputs().iteritems():
                if aa.additional and not kk in self._inputs:
                    self.add_input(kk, aa)

        self.binary_input = previous_artifact.binary_output
        self.input_data_dict = previous_artifact.data_dict
        self.input_ext = previous_artifact.ext
        self.previous_artifact_hashstring = previous_artifact.hashstring
        self.previous_artifact_filename = previous_artifact.filename()
        self.previous_artifact_filepath = previous_artifact.filepath()
        self.previous_canonical_filename = previous_artifact.canonical_filename(True)
        self.previous_long_canonical_filename = previous_artifact.long_canonical_filename()
        self.previous_websafe_key = previous_artifact.websafe_key()

        # The JSON output of previous artifact
        if not previous_artifact.binary_output:
            self.previous_cached_output_filepath = previous_artifact.cached_output_filepath()

        # Determine file extension of output
        if hasattr(self, 'next_filter_class'):
            next_inputs = self.next_filter_class.INPUT_EXTENSIONS
        else:
            next_inputs = None

        if self.filter_args().has_key('ext'):
            ext = self.filter_args()['ext']
            if not ext.startswith("."):
                ext = ".%s" % ext
            self.ext = ext
        else:
            self.ext = self.filter_class.output_file_extension(
                    previous_artifact.ext,
                    self.name,
                    next_inputs)

        self.binary_output = self.filter_class.BINARY
        if self.binary_output is None:
            self.set_binary_from_ext()

        self.state = 'setup'

    @classmethod
    def setup(klass, doc, artifact_key, filter_class = None, previous_artifact = None):
        """
        Create an Artifact instance and load all information needed to
        calculate its hashstring.
        """
        artifact = klass()
        artifact.key = artifact_key
        artifact.filter_class = filter_class
        artifact.is_last = (artifact.key == doc.key())

        # Add references for convenience
        artifact.artifacts_dir = doc.artifacts_dir
        artifact.controller_args = doc.controller.args
        artifact.hashfunction = doc.controller.args['hashfunction']
        artifact.db = doc.db
        artifact.doc = doc
        artifact.log = doc.log

        # These attributes are the same for all artifacts pertaining to a document
        artifact.args = doc.args
        artifact.batch_id = doc.batch_id
        artifact.document_key = doc.key()
        artifact.name = doc.name

        # Set batch order to next in sequence
        artifact.batch_order = artifact.db.next_batch_order(artifact.batch_id)

        next_filter_class = doc.next_filter_class()
        if next_filter_class:
            artifact.next_filter_name = next_filter_class.__name__
            artifact.next_filter_class = next_filter_class

        # Set inputs from original document inputs.
        artifact._inputs.update(artifact.doc.input_artifacts())
        if len(artifact.doc.input_artifacts().keys()) > 10:
            doc.log.debug("Setting inputs to include %s document inputs" % len(artifact.doc.input_artifacts()))
        elif len(artifact.doc.input_artifacts().keys()) > 0:
            doc.log.debug("Setting inputs to include inputs: %s" % ",".join(artifact.doc.input_artifacts().keys()))

        for k, a in artifact.doc.input_artifacts().iteritems():
            if a.additional and not k in artifact._inputs:
                doc.log.debug("Adding additional input %s" % k)
                artifact.add_input(k, a)

            for kk, aa in a.inputs().iteritems():
                if aa.additional and not kk in artifact._inputs:
                    doc.log.debug("Adding additional input %s" % kk)
                    artifact.add_input(kk, aa)

        if previous_artifact:
            artifact.setup_from_previous_artifact(previous_artifact)
            artifact.setup_from_filter_class()
        else:
            artifact.setup_initial()

        artifact.set_hashstring()

        return artifact

    def run(self):
        start = time.time()

        if self.controller_args['nocache'] or not self.is_complete():
            # We have to actually run things...
            if not self.filter_class:
                self.filter_class = dexy.introspect.get_filter_by_name(self.filter_name, self.doc.__class__.filter_list)

            # Set up instance of filter.
            filter_instance = self.filter_class()
            filter_instance.artifact = self
            filter_instance.log = self.log

            # Make sure previous artifact is loaded.
            if not self.binary_input and len(self.input_text()) == 0:
                f = open(self.previous_artifact_filepath, "rb")
                self.data_dict['1'] = f.read()
                f.close()

            try:
                filter_instance.process()
            except dexy.commands.UserFeedback as e:
                messages = []
                err_msg_args = (self.doc.key(), self.filter_alias, self.doc.step, len(self.doc.filters))
                messages.append("ERROR in %s (in filter '%s' - step %s of %s)" % err_msg_args)
                messages.append(e.message)

                for message in messages:
                    self.log.debug(message)

                messages.append("This exception information has been written to logs/dexy.log")
                messages.append("There may be more information in logs/dexy.log")
                if self.log.getEffectiveLevel() > logging.DEBUG:
                    messages.append("If you can't find clues in the log, try running again with -loglevel DEBUG")

                raise dexy.commands.UserFeedback("\n".join(messages))

            except dexy.commands.InternalDexyProblem as e:
                err_msg_args = (self.doc.key(), self.filter_alias, self.doc.step, len(self.doc.filters))
                sys.stderr.write("ERROR in %s (in filter '%s' - step %s of %s)\n" % err_msg_args)
                raise e
            except Exception as e:
                traceback.print_tb(sys.exc_info()[2])
                err_msg_args = (self.doc.key(), self.filter_alias, self.doc.step, len(self.doc.filters))
                sys.stderr.write("ERROR in %s (in filter '%s' - step %s of %s)\n" % err_msg_args)
                if e.message:
                    raise dexy.commands.InternalDexyProblem("error class: %s\nerror message: %s" % (e.__class__.__name__, e.message))
                else:
                    raise dexy.commands.InternalDexyProblem("error class: %s" % e.__class__.__name__)

            if self.data_dict and len(self.data_dict) > 0:
                pass

            elif self.is_canonical_output_cached:
                self.state = 'complete'
                self.save()

            else:
                raise Exception("data neither in memory nor on disk")

            self.logstream = self.doc.logstream.getvalue()
            self.state = 'complete'
            self.source = 'run'
            self.save()
        else:
            self.source = 'cache'
            self.log.debug("using cached artifact for %s" % self.key)

            # make sure additional artifacts are added to db
            for a in self.inputs().values():
                if a.additional and not a.key in self.db.extra_keys:
                    a.batch_id = self.batch_id
                    self.db.append_artifact(a)

        self.elapsed = time.time() - start
        self.db.update_artifact(self)

    def add_additional_artifact(self, key_with_ext, ext=None):
        if not ext:
            ext = os.path.splitext(key_with_ext)[1]
        new_artifact = self.__class__()
        new_artifact.key = key_with_ext
        if ext.startswith("."):
            new_artifact.ext = ext
        else:
            new_artifact.ext = ".%s" % ext
        new_artifact.final = True
        new_artifact.hashfunction = self.hashfunction
        new_artifact.additional = True
        new_artifact.set_binary_from_ext()
        new_artifact.artifacts_dir = self.artifacts_dir
        new_artifact.inode = self.hashstring
        new_artifact.created_by = self.key
        new_artifact.virtual = True
        new_artifact.name = key_with_ext.split("|")[0]

        # TODO this is duplicated in setup_from_previous_artifact, should reorganize
        for at in ['batch_id', 'document_key', 'mtime', 'ctime', 'virtual_docs']:
                val = getattr(self, at)
                setattr(new_artifact, at, val)

        new_artifact.set_hashstring()
        self.log.debug("new artifact %s hashstring %s" % (key_with_ext, new_artifact.hashstring))
        self.add_input(key_with_ext, new_artifact)
        self.db.append_artifact(new_artifact) # append to db because not part of doc.artifacts
        return new_artifact

    def add_input(self, key, artifact):
        self._inputs[key] = artifact
        self.additional_inputs.append(artifact.hashstring)

    def inputs(self):
        return self._inputs

    def set_binary_from_ext(self):
        # TODO list more binary extensions or find better way to do this
        if self.ext in self.BINARY_EXTENSIONS:
            self.binary_output = True
        else:
            self.binary_output = False

    def set_data(self, data):
        self.data_dict['1'] = data

    def set_data_from_artifact(self):
        f = codecs.open(self.filepath(), "r", encoding="utf-8")
        self.data_dict['1'] = f.read()

    def is_loaded(self):
        return hasattr(self, 'data_dict') and len(self.data_dict) > 0

    def compute_hash(self, text):
        unicode_text = None

        if type(text) == unicode:
            unicode_text = text
        elif type(text) in [dict, list]:
            unicode_text = json.dumps(text)
        elif self.binary_input:
            pass
        else:
            unicode_text = unicode(text, encoding="utf-8")

        if unicode_text:
            text = unicode_text.encode("utf-8")

        if self.hashfunction == 'md5':
            h = hashlib.md5(text).hexdigest()

        elif self.hashfunction == 'sha1':
            h = hashlib.sha1(text).hexdigest()

        elif self.hashfunction == 'sha224':
            h = hashlib.sha224(text).hexdigest()

        elif self.hashfunction == 'sha256':
            h = hashlib.sha256(text).hexdigest()

        elif self.hashfunction == 'sha384':
            h = hashlib.sha384(text).hexdigest()

        elif self.hashfunction == 'sha512':
            h = hashlib.sha512(text).hexdigest()

        elif self.hashfunction == 'crc32':
            h = str(zlib.crc32(text) & 0xffffffff)

        elif self.hashfunction == 'adler32':
            h = str(zlib.adler32(text) & 0xffffffff)

        else:
            raise Exception("unexpected hash function %s" % self.hashfunction)

        return h

    def input_hashes(self):
        """
        Returns an OrderedDict of key, hashstring for each input artifact, sorted by key.
        """
        return OrderedDict((k, str(self.inputs()[k].hashstring)) for k in sorted(self.inputs()))

    def hash_dict(self):
        """
        Calculate and cache the elements used to compute the hashstring
        """
        if not hasattr(self.__class__, 'SOURCE_CODE'):
            artifact_class_source = inspect.getsource(self.__class__)
            artifact_py_source = inspect.getsource(Artifact)
            self.__class__.SOURCE_CODE = self.compute_hash(artifact_class_source + artifact_py_source)

        self.artifact_class_source = self.__class__.SOURCE_CODE

        if self.dirty:
            self.dirty_string = time.gmtime()

        hash_dict = OrderedDict()

        hash_dict['inputs'] = self.input_hashes()

        for k in self.HASH_WHITELIST:
            if self.__dict__.has_key(k):
                v = self.__dict__[k]
                if hasattr(v, 'items'):
                    hash_v = OrderedDict()
                    for k1 in sorted(v.keys()):
                        v1 = v[k1]
                        try:
                            if len(str(v1)) > 50:
                                raise Exception()
                            json.dumps(v1)
                            hash_v[str(k1)] = v1
                        except Exception:
                            # Use a hash if we will have problems saving to JSON
                            # or if the data is large (don't want to clutter up the DB,
                            # makes it harder to spot differences)
                            hash_v[str(k1)] = self.compute_hash(v1)
                else:
                    hash_v = str(v)
                hash_dict[str(k)] = hash_v
        return hash_dict

    def set_hashstring(self):
        if hasattr(self, 'hashstring'):
            raise Exception("setting hashstring twice")

        hash_data = str(self.hash_dict())
        self.hashstring = self.compute_hash(hash_data)

        try:
            original_document_key = self.document_key
            if not self.is_loaded():
                self.load()
            self.document_key = original_document_key
        except AttributeError as e:
            if not self.is_abstract():
                raise e
        except IOError as e:
            self.save_meta()

    def convert_if_not_unicode(self, s):
        if type(s) == unicode:
            return s
        elif s == None:
            return u""
        else:
            try:
                ut = unicode(s, encoding="utf-8")
                return ut
            except Exception as e:
                print "error occurred trying to convert text to unicode in", self.key
                raise e


    def input_text(self):
        return u"".join([self.convert_if_not_unicode(v) for k, v in self.input_data_dict.items()])

    def output_text(self):
        return u"".join([self.convert_if_not_unicode(v) for k, v in self.data_dict.items()])

    def read_binary_output(self):
        self.binary_output = True
        self.load_output()
        self.binary_output = False
        return self.binary_data

    def output(self):
        if not self.is_complete():
            raise Exception("can't call output unless complete!")

        if self.binary_output:
            if not hasattr(self, 'binary_data'):
                self.load_output()
            return self.binary_data
        else:
            return self.output_text()

    def relative_refs(self, relative_to_file):
        """How to refer to this artifact, relative to another."""

        doc_dir = os.path.dirname(relative_to_file)
        return [
                os.path.relpath(self.key, doc_dir),
                os.path.relpath(self.long_canonical_filename(), doc_dir),
                "/%s" % self.key,
                "/%s" % self.long_canonical_filename()
        ]

    def use_canonical_filename(self):
        """Returns the canonical filename after saving contents under this name
        in the artifacts directory."""
        self.write_to_file(os.path.join(self.artifacts_dir,
                                        self.canonical_filename()))
        return self.canonical_filename()

    def write_to_file(self, filename):
        dirname = os.path.dirname(filename)
        if not os.path.exists(dirname) and not dirname == '':
            os.makedirs(dirname)
        shutil.copyfile(self.filepath(), filename)

    def work_filename(self):
        return "%s.work%s" % (self.hashstring, self.input_ext)

    def generate_workfile(self, work_filename = None):
        if not work_filename:
            work_filename = self.work_filename()
        work_path = os.path.join(self.artifacts_dir, work_filename)
        work_file = codecs.open(work_path, "w", encoding="utf-8")
        work_file.write(self.input_text())
        work_file.close()

    def temp_filename(self, ext):
        return "%s.work%s" % (self.hashstring, ext)

    def open_tempfile(self, ext):
        tempfile_path = os.path.join(self.artifacts_dir, self.temp_filename(ext))
        codecs.open(tempfile_path, "w", encoding="utf-8")

    def temp_dir(self):
        return os.path.join(self.artifacts_dir, self.hashstring)

    def create_temp_dir(self, populate=False):
        tempdir = self.temp_dir()
        shutil.rmtree(tempdir, ignore_errors=True)
        os.mkdir(tempdir)

        if populate:
            # write all inputs to this directory, under their canonical names
            for input_artifact in self._inputs.values():
                filename = os.path.join(tempdir, input_artifact.canonical_filename())
                if os.path.exists(input_artifact.filepath()):
                    input_artifact.write_to_file(filename)
                    self.log.debug("Populating temp dir for %s with %s" % (self.key, filename))
                else:
                    self.log.warn("Not populating temp dir for %s with file %s, file does not exist (yet)" % (self.key, filename))

            # write the workfile to this directory under its canonical name
            previous = self.previous_artifact_filepath
            workfile = os.path.join(tempdir, self.previous_canonical_filename)
            if not os.path.exists(os.path.dirname(workfile)):
                os.makedirs(os.path.dirname(workfile))
            self.log.debug("Copying %s to %s" % (previous, workfile))
            shutil.copyfile(previous, workfile)

    def alias(self):
        """
        Whether this artifact includes an alias.
        """
        aliases = [k for k in self.key.split("|") if k.startswith("-")]
        if len(aliases) > 0:
            return aliases[0]

    def canonical_dir(self, ignore_args = False):
        return os.path.dirname(self.name)

    def canonical_basename(self, ignore_args = False):
        return os.path.basename(self.canonical_filename(ignore_args))

    def canonical_filename(self, ignore_args = False):
        fn = os.path.splitext(self.key.split("|")[0])[0]

        if self.args.has_key('canonical-name') and not ignore_args:
            parent_dir = os.path.dirname(fn)
            return os.path.join(parent_dir, self.args['canonical-name'])
        elif self.args.has_key('postfix') and not ignore_args:
            return "%s%s%s" % (fn, self.ext, self.args['postfix'])
        elif self.alias():
            return "%s%s%s" % (fn, self.alias(), self.ext)
        else:
            return "%s%s" % (fn, self.ext)

    def long_canonical_filename(self):
        if not "|" in self.key:
            return self.key.replace("|", "-")
        else:
            return "%s%s" % (self.key.replace("|", "-"), self.ext)

    def websafe_key(self):
        return self.long_canonical_filename().replace("/", "--")

    def web_safe_document_key(self):
        # duplicate, remove this alias
        return self.websafe_key()

    def filename(self):
        """
        The filename where artifact content is stored, based on the hashstring.
        """
        if not hasattr(self, 'ext'):
            raise Exception("artifact %s has no ext" % self.key)
        return "%s%s" % (self.hashstring, self.ext)

    def filepath(self):
        """
        Full path (including artifacts dir location) to location where artifact content is stored.
        """
        return os.path.join(self.artifacts_dir, self.filename())

    def abs_filepath(self):
        return os.path.abspath(self.filepath())

    def breadcrumbs(self):
        """A list of parent dirs, plus the filename if it's not 'index.html'."""
        parent_dirs = os.path.dirname(self.canonical_filename()).split("/")

        if self.canonical_basename() == "index.html":
            result = parent_dirs
        else:
            result = parent_dirs.append(self.canonical_basename())

        if not result:
            result = []

        return result

    def titleized_name(self):
        if self.canonical_basename() == "index.html":
            return self.breadcrumbs()[-1].replace("-"," ").title()
        else:
            return os.path.splitext(self.canonical_basename())[0].replace("-"," ").title()

    def unique_key(self):
        return "%s:%s:%s" % (self.batch_id, self.document_key, self.key)

    def websafe_unique_key(self):
        return self.unique_key().replace("/", "--")

    def url(self):
        # TODO test for final
        return "/%s" % self.canonical_filename()

    def hyperlink(self, link_text = None):
        # TODO test for final
        if not link_text:
            link_text = self.canonical_basename()

        return """<a href="%s">%s</a>""" % (self.url(), link_text)

    def iframe(self, link_text = None, width = "600px", height = "300px"):
        # TODO test for final
        args = {
                'url' : self.url(),
                'hyperlink' : self.hyperlink(link_text),
                'width' : width,
                'height' : height
        }

        return """
<iframe src="%(url)s" width="%(width)s" height="%(height)s" style="border: thin solid gray;">
%(hyperlink)s
</iframe>
        """ % args

    def img(self):
        # TODO test for final
        return """<img src="/%s" alt="Image generated by dexy %s" />""" % (self.canonical_filename(), self.key)

    def relpath(self, artifact_key):
        """
        Returns relative path from self to other artifact key, e.g. for linking to CSS relatively
        """
        artifact = self.inputs()[artifact_key]
        return os.path.join(self.relative_path_to_input(artifact), artifact.canonical_basename())

    def has_sections(self):
        return (self.data_dict.keys() != ['1'])

    def relative_path_to_input(self, input_artifact):
        my_dir = os.path.dirname(self.name)
        input_dir = os.path.dirname(input_artifact.name)
        self.log.debug("Calculating relative path between %s and %s" % (self.name, input_artifact.name))

        if not my_dir:
            my_dir = "."
        if not input_dir:
            input_dir = "."

        if my_dir == input_dir:
            relpath = ""
        else:
            relpath = os.path.relpath(input_dir, my_dir)
        return relpath

    def relative_key_for_input(self, input_artifact):
        relpath = self.relative_path_to_input(input_artifact)
        return os.path.join(relpath, os.path.basename(input_artifact.key))

    def convert_numbered_dict_to_ordered_dict(self, numbered_dict):
        ordered_dict = OrderedDict()
        for x in sorted(numbered_dict.keys()):
            k = x.split(":", 1)[1]
            ordered_dict[k] = numbered_dict[x]
        return ordered_dict

    def convert_data_dict_to_numbered_dict(self):
        if len(self.data_dict) >= self.MAX_DATA_DICT_LENGTH:
            exception_msg = """Your data dict has %s items, which is greater than the arbitrary limit of %s items.
            You can increase this limit by changing MAX_DATA_DICT_DECIMALS."""
            raise Exception(exception_msg % (len(self.data_dict), self.MAX_DATA_DICT_LENGTH))

        data_dict = {}
        i = -1
        for k, v in self.data_dict.iteritems():
            i += 1
            fmt = "%%0%sd:%%s" % self.MAX_DATA_DICT_DECIMALS
            data_dict[fmt % (i, k)] = v
        return data_dict

    def storage(self, reset=False):
        if not hasattr(self, "_storage") or reset:
            # Assume we want KV storage
            self.setup_kv_storage()
        return self._storage

    def key_prefixes(self):
        return sorted(set(":".join(k.split(":")[:-1]) for k in self.storage().keys()))

    def kv_storage(self):
        return self.storage()

    def row_storage(self):
        if not hasattr(self, "_storage"):
            self.setup_row()
        return self._storage

    def setup_kv_storage(self):
        try:
            self._storage = dexy.helpers.KeyValueData(self.filepath())
        except ValueError as e:
            raise dexy.commands.UserFeedback("Can't get key-value data from %s for %s: %s" % self.filepath(), self.key, e.message)

    def setup_row_storage(self):
        self._storage = dexy.helpers.RowData(self.filepath())
Exemple #25
0
class SectionsFileParser(foundations.io.File):
	"""
	This class provides methods to parse sections file format files,
	an alternative configuration file parser is available directly with Python: :class:`ConfigParser.ConfigParser`.

	The parser given by this class has some major differences with Python :class:`ConfigParser.ConfigParser`:

		- | Sections and attributes are stored in their appearance order by default.
			( Using Python :class:`collections.OrderedDict` )
		- | A default section ( **_default** ) will store orphans attributes 
			( Attributes appearing before any declared section ).
		- File comments are stored inside the :obj:`SectionsFileParser.comments` class property. 
		- | Sections, attributes and values are whitespaces stripped by default
			but can also be stored with their leading and trailing whitespaces. 
		- | Values are quotations markers stripped by default
			but can also be stored with their leading and trailing quotations markers. 
		- Attributes are namespaced by default allowing sections merge without keys collisions. 

	"""

	def __init__(self,
				file=None,
				splitters=("=", ":"),
				namespaceSplitter="|",
				commentLimiters=(";", "#"),
				commentMarker="#",
				quotationMarkers=("\"", "'", "`"),
				rawSectionContentIdentifier="_rawSectionContent",
				defaultsSection="_defaults"):
		"""
		This method initializes the class.
		
		Usage::
		
			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse(stripComments=False)
			True
			>>> sectionsFileParser.sections.keys()
			['Section A', 'Section B']
			>>> sectionsFileParser.comments 
			OrderedDict([('Section A|#0', {'content': 'Comment.', 'id': 0})])

		:param file: Current file path. ( String )
		:param splitters: Splitter characters.  ( Tuple / List )
		:param namespaceSplitter: Namespace splitters character. ( String )
		:param commentLimiters: Comment limiters characters. ( Tuple / List )
		:param commentMarker: Character use to prefix extracted comments idientifiers. ( String )
		:param quotationMarkers: Quotation markers characters. ( Tuple / List )
		:param rawSectionContentIdentifier: Raw section content identifier. ( String )
		:param defaultsSection: Default section name. ( String )
		"""

		LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))

		foundations.io.File.__init__(self, file)

		# --- Setting class attributes. ---
		self.__splitters = None
		self.splitters = splitters
		self.__namespaceSplitter = None
		self.namespaceSplitter = namespaceSplitter
		self.__commentLimiters = None
		self.commentLimiters = commentLimiters
		self.__commentMarker = None
		self.commentMarker = commentMarker
		self.__quotationMarkers = None
		self.quotationMarkers = quotationMarkers
		self.__rawSectionContentIdentifier = None
		self.rawSectionContentIdentifier = rawSectionContentIdentifier
		self.__defaultsSection = None
		self.defaultsSection = defaultsSection

		self.__sections = None
		self.__comments = None
		self.__parsingErrors = None

	#******************************************************************************************************************
	#***	Attributes properties.
	#******************************************************************************************************************
	@property
	def splitters(self):
		"""
		This method is the property for **self.__splitters** attribute.

		:return: self.__splitters. ( Tuple / List )
		"""

		return self.__splitters

	@splitters.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def splitters(self, value):
		"""
		This method is the setter method for **self.__splitters** attribute.

		:param value: Attribute value. ( Tuple / List )
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
			"splitters", value)
			for element in value:
				assert type(element) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"splitters", element)
				assert len(element) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("splitter", element)
				assert not re.search(r"\w", element), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
				"splitter", element)
		self.__splitters = value

	@splitters.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def splitters(self):
		"""
		This method is the deleter method for **self.__splitters** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "splitters"))

	@property
	def namespaceSplitter(self):
		"""
		This method is the property for **self.__namespaceSplitter** attribute.

		:return: self.__namespaceSplitter. ( String )
		"""

		return self.__namespaceSplitter

	@namespaceSplitter.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def namespaceSplitter(self, value):
		"""
		This method is the setter method for **self.__namespaceSplitter** attribute.

		:param value: Attribute value. ( String )
		"""

		if value is not None:
			assert type(value) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
			"namespaceSplitter", value)
			assert len(value) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("namespaceSplitter", value)
			assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
			"namespaceSplitter", value)
		self.__namespaceSplitter = value

	@namespaceSplitter.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def namespaceSplitter(self):
		"""
		This method is the deleter method for **self.__namespaceSplitter** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "namespaceSplitter"))

	@property
	def commentLimiters(self):
		"""
		This method is the property for **self.__commentLimiters** attribute.

		:return: self.__commentLimiters. ( Tuple / List )
		"""

		return self.__commentLimiters

	@commentLimiters.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def commentLimiters(self, value):
		"""
		This method is the setter method for **self.__commentLimiters** attribute.

		:param value: Attribute value. ( Tuple / List )
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
			"commentLimiters", value)
			for element in value:
				assert type(element) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"commentLimiters", element)
		self.__commentLimiters = value

	@commentLimiters.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def commentLimiters(self):
		"""
		This method is the deleter method for **self.__commentLimiters** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "commentLimiters"))

	@property
	def commentMarker(self):
		"""
		This method is the property for **self.__commentMarker** attribute.

		:return: self.__commentMarker. ( String )
		"""

		return self.__commentMarker

	@commentMarker.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def commentMarker(self, value):
		"""
		This method is the setter method for **self.__commentMarker** attribute.

		:param value: Attribute value. ( String )
		"""

		if value is not None:
			assert type(value) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
			"commentMarker", value)
			assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
			"commentMarker", value)
		self.__commentMarker = value

	@commentMarker.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def commentMarker(self):
		"""
		This method is the deleter method for **self.__commentMarker** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "commentMarker"))

	@property
	def quotationMarkers(self):
		"""
		This method is the property for **self.__quotationMarkers** attribute.

		:return: self.__quotationMarkers. ( Tuple / List )
		"""

		return self.__quotationMarkers

	@quotationMarkers.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def quotationMarkers(self, value):
		"""
		This method is the setter method for **self.__quotationMarkers** attribute.

		:param value: Attribute value. ( Tuple / List )
		"""

		if value is not None:
			assert type(value) in (tuple, list), "'{0}' attribute: '{1}' type is not 'tuple' or 'list'!".format(
			"quotationMarkers", value)
			for element in value:
				assert type(element) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"quotationMarkers", element)
				assert len(element) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("quotationMarkers",
				 																					element)
				assert not re.search(r"\w", element), "'{0}' attribute: '{1}' is an alphanumeric character!".format(
				"quotationMarkers", element)
		self.__quotationMarkers = value

	@quotationMarkers.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def quotationMarkers(self):
		"""
		This method is the deleter method for **self.__quotationMarkers** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "quotationMarkers"))

	@property
	def rawSectionContentIdentifier(self):
		"""
		This method is the property for **self.__rawSectionContentIdentifier** attribute.

		:return: self.__rawSectionContentIdentifier. ( String )
		"""

		return self.__rawSectionContentIdentifier

	@rawSectionContentIdentifier.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def rawSectionContentIdentifier(self, value):
		"""
		This method is the setter method for **self.__rawSectionContentIdentifier** attribute.

		:param value: Attribute value. ( String )
		"""

		if value is not None:
			assert type(value) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
			"rawSectionContentIdentifier", value)
		self.__rawSectionContentIdentifier = value

	@rawSectionContentIdentifier.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def rawSectionContentIdentifier(self):
		"""
		This method is the deleter method for **self.__rawSectionContentIdentifier** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "rawSectionContentIdentifier"))

	@property
	def defaultsSection(self):
		"""
		This method is the property for **self.__defaultsSection** attribute.

		:return: self.__defaultsSection. ( String )
		"""

		return self.__defaultsSection

	@defaultsSection.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def defaultsSection(self, value):
		"""
		This method is the setter method for **self.__defaultsSection** attribute.

		:param value: Attribute value. ( String )
		"""

		if value is not None:
			assert type(value) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
			"defaultsSection", value)
		self.__defaultsSection = value

	@defaultsSection.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def defaultsSection(self):
		"""
		This method is the deleter method for **self.__defaultsSection** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "defaultsSection"))

	@property
	def sections(self):
		"""
		This method is the property for **self.__sections** attribute.

		:return: self.__sections. ( OrderedDict / Dictionary )
		"""

		return self.__sections

	@sections.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def sections(self, value):
		"""
		This method is the setter method for **self.__sections** attribute.

		:param value: Attribute value. ( OrderedDict / Dictionary )
		"""

		if value is not None:
			assert type(value) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
			'OrderedDict' or 'dict'!".format("sections", value)
			for key, element in value.iteritems():
				assert type(key) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"sections", key)
				assert type(element) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
				'OrderedDict' or 'dict'!".format("sections", key)
		self.__sections = value

	@sections.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def sections(self):
		"""
		This method is the deleter method for **self.__sections** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "sections"))

	@property
	def comments(self):
		"""
		This method is the property for **self.__comments** attribute.

		:return: self.__comments. ( OrderedDict / Dictionary )
		"""

		return self.__comments

	@comments.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def comments(self, value):
		"""
		This method is the setter method for **self.__comments** attribute.

		:param value: Attribute value. ( OrderedDict / Dictionary )
		"""

		if value is not None:
			assert type(value) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
			'OrderedDict' or 'dict'!".format("comments", value)
			for key, element in value.iteritems():
				assert type(key) in (str, unicode), "'{0}' attribute: '{1}' type is not 'str' or 'unicode'!".format(
				"comments", key)
				assert type(element) in (OrderedDict, dict), "'{0}' attribute: '{1}' type is not \
				'OrderedDict' or 'dict'!".format("comments", key)
		self.__comments = value

	@comments.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def comments(self):
		"""
		This method is the deleter method for **self.__comments** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "comments"))

	@property
	def parsingErrors(self):
		"""
		This method is the property for **self.__parsingErrors** attribute.

		:return: self.__parsingErrors. ( List )
		"""

		return self.__parsingErrors

	@parsingErrors.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def parsingErrors(self, value):
		"""
		This method is the setter method for **self.__parsingErrors** attribute.

		:param value: Attribute value. ( List )
		"""

		if value is not None:
			assert type(value) is list, "'{0}' attribute: '{1}' type is not 'list'!".format("parsingErrors", value)
			for element in value:
				assert issubclass(element.__class__, foundations.exceptions.AbstractParsingError), \
				"'{0}' attribute: '{1}' is not a '{2}' subclass!".format(
				"parsingErrors", element, foundations.exceptions.AbstractParsingError.__class__.__name__)
		self.__parsingErrors = value

	@parsingErrors.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def parsingErrors(self):
		"""
		This method is the deleter method for **self.__parsingErrors** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
		"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "parsingErrors"))

	#******************************************************************************************************************
	#***	Class methods.
	#******************************************************************************************************************
	@foundations.exceptions.handleExceptions(foundations.exceptions.FileStructureParsingError)
	def parse(self,
			orderedDictionary=True,
			rawSections=None,
			namespaces=True,
			stripComments=True,
			stripWhitespaces=True,
			stripQuotationMarkers=True,
			raiseParsingErrors=True):
		"""
		This method process the file content and extract the sections / attributes
			as nested :class:`collections.OrderedDict` dictionaries or dictionaries.

		Usage::

			>>> content = ["; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse(stripComments=False)
			True
			>>> sectionsFileParser.sections.keys()
			['_defaults']
			>>> sectionsFileParser.sections["_defaults"].values()
			['Value A', 'Value B']
			>>> sectionsFileParser.parse(stripQuotationMarkers=False)
			True
			>>> sectionsFileParser.sections["_defaults"].values()
			['"Value A"', '"Value B"']
			>>> sectionsFileParser.comments 
			OrderedDict([('_defaults|#0', {'content': 'Comment.', 'id': 0})])
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.sections["_defaults"]
			OrderedDict([('_defaults|Attribute 1', 'Value A'), ('_defaults|Attribute 2', 'Value B')])
			>>> sectionsFileParser.parse(namespaces=False)
			OrderedDict([('Attribute 1', 'Value A'), ('Attribute 2', 'Value B')])

		:param orderedDictionary: SectionsFileParser data is stored
			in :class:`collections.OrderedDict` dictionaries. ( Boolean )
		:param rawSections: Ignored raw sections. ( Tuple / List )
		:param namespaces: Attributes and comments are namespaced. ( Boolean )
		:param stripComments: Comments are stripped. ( Boolean )
		:param stripWhitespaces: Whitespaces are stripped. ( Boolean )
		:param stripQuotationMarkers: Attributes values quotation markers are stripped. ( Boolean )
		:param raiseParsingErrors: Raise parsing errors. ( Boolean )
		:return: Method success. ( Boolean )
		"""

		LOGGER.debug("> Reading sections from: '{0}'.".format(self.path))

		if not self.content:
			return False

		if not orderedDictionary:
			self.__sections = {}
			self.__comments = {}
			attributes = {}
		else:
			self.__sections = OrderedDict()
			self.__comments = OrderedDict()
			attributes = OrderedDict()
		section = self.__defaultsSection
		rawSections = rawSections or []
		self.__parsingErrors = []

		commentId = 0
		for i, line in enumerate(self.content):
			# Comments matching.
			search = re.search(r"^\s*[{0}](?P<comment>.+)$".format("".join(self.__commentLimiters)), line)
			if search:
				if not stripComments:
					comment = namespaces and foundations.namespace.setNamespace(section, "{0}{1}".format(
							self.__commentMarker, commentId), self.__namespaceSplitter) or \
							"{0}{1}".format(self.__commentMarker, commentId)
					self.__comments[comment] = {"id" : commentId, "content" : stripWhitespaces and \
												search.group("comment").strip() or search.group("comment")}
					commentId += 1
				continue

			# Sections matching.
			search = re.search(r"^\s*\[(?P<section>.+)\]\s*$", line)
			if search:
				section = stripWhitespaces and search.group("section").strip() or search.group("section")
				if not orderedDictionary:
					attributes = {}
				else:
					attributes = OrderedDict()
				rawContent = []
				continue

			if section in rawSections:
				rawContent.append(line)
				attributes[self.__rawSectionContentIdentifier] = rawContent
			else:
				# Empty line matching.
				search = re.search(r"^\s*$", line)
				if search:
					continue

				# Attributes matching.
				search = re.search(r"^(?P<attribute>.+?)[{0}](?P<value>.+)$".format("".join(self.__splitters)), line)
				if search:
					attribute = stripWhitespaces and search.group("attribute").strip() or search.group("attribute")
					attribute = namespaces and foundations.namespace.setNamespace(section,
																				attribute,
																				self.__namespaceSplitter) or attribute
					value = stripWhitespaces and search.group("value").strip() or search.group("value")
					attributes[attribute] = stripQuotationMarkers and value.strip("".join(self.__quotationMarkers)) or value
				else:
					self.__parsingErrors.append(foundations.exceptions.AttributeStructureParsingError(
					"Attribute structure is invalid: {0}".format(line), i + 1))

			self.__sections[section] = attributes

		LOGGER.debug("> Sections: '{0}'.".format(self.__sections))
		LOGGER.debug("> '{0}' file parsing done!".format(self.path))

		if self.__parsingErrors and raiseParsingErrors:
			raise foundations.exceptions.FileStructureParsingError(
			"{0} | '{1}' structure is invalid, parsing exceptions occured!".format(self.__class__.__name__, self.path))

		return True

	def sectionExists(self, section):
		"""
		This method checks if given section exists.
		
		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.sectionExists("Section A")
			True
			>>> sectionsFileParser.sectionExists("Section C")
			False

		:param section: Section to check existence. ( String )
		:return: Section existence. ( Boolean )
		"""

		if not self.__sections:
			return False

		if section in self.__sections:
			LOGGER.debug("> '{0}' section exists in '{1}'.".format(section, self))
			return True
		else:
			LOGGER.debug("> '{0}' section doesn't exists in '{1}'.".format(section, self))
			return False

	def attributeExists(self, attribute, section):
		"""
		This method checks if given attribute exists.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.attributeExists("Attribute 1", "Section A")
			True
			>>> sectionsFileParser.attributeExists("Attribute 2", "Section A")
			False

		:param attribute: Attribute to check existence. ( String )
		:param section: Section to search attribute into. ( String )
		:return: Attribute existence. ( Boolean )
		"""

		if not self.__sections:
			return False

		if foundations.namespace.removeNamespace(attribute, rootOnly=True) in self.getAttributes(section,
																					orderedDictionary=True,
																					stripNamespaces=True):
			LOGGER.debug("> '{0}' attribute exists in '{1}' section.".format(attribute, section))
			return True
		else:
			LOGGER.debug("> '{0}' attribute doesn't exists in '{1}' section.".format(attribute, section))
			return False

	def getAttributes(self, section, orderedDictionary=True, stripNamespaces=False):
		"""
		This method returns given section attributes.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.getAttributes("Section A")
			OrderedDict([('Section A|Attribute 1', 'Value A')])
			>>> sectionsFileParser.getAttributes("Section A", orderedDictionary=False)
			{'Section A|Attribute 1': 'Value A'}
			>>> sectionsFileParser.getAttributes("Section A", stripNamespaces=True)
			OrderedDict([('Attribute 1', 'Value A')])

		:param section: Section containing the requested attributes. ( String )
		:param orderedDictionary: Use an :class:`collections.OrderedDict` dictionary to store the attributes. ( String )
		:param stripNamespaces: Strip namespaces while retrieving attributes. ( Boolean )
		:return: Attributes. ( OrderedDict / Dictionary )
		"""

		LOGGER.debug("> Getting section '{0}' attributes.".format(section))
		dictionary = orderedDictionary and OrderedDict or dict
		attributes = dictionary()
		if not self.sectionExists(section):
			return attributes

		if stripNamespaces:
			for attribute, value in self.__sections[section].iteritems():
				attributes[foundations.namespace.removeNamespace(attribute, rootOnly=True)] = value
		else:
			attributes.update(self.__sections[section])
		LOGGER.debug("> Attributes: '{0}'.".format(attributes))
		return attributes

	def getAllAttributes(self, orderedDictionary=True):
		"""
		This method returns all sections attributes.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.getAllAttributes()
			OrderedDict([('Section A|Attribute 1', 'Value A'), ('Section B|Attribute 2', 'Value B')])
			>>> sectionsFileParser.getAllAttributes(orderedDictionary=False)
			{'Section B|Attribute 2': 'Value B', 'Section A|Attribute 1': 'Value A'}

		:param orderedDictionary: Use an :class:`collections.OrderedDict` dictionary to store the attributes. ( String )
		:return: All sections / files attributes. ( OrderedDict / Dictionary )
		"""

		dictionary = orderedDictionary and OrderedDict or dict
		allAttributes = dictionary()
		if not self.__sections:
			return allAttributes

		for attributes in self.__sections.itervalues():
			for attribute, value in attributes.iteritems():
				allAttributes[attribute] = value
		return allAttributes

	def getValue(self, attribute, section, encode=False, default=str()):
		"""
		This method returns requested attribute value.

		Usage::

			>>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \
"[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"]
			>>> sectionsFileParser = SectionsFileParser()
			>>> sectionsFileParser.content = content
			>>> sectionsFileParser.parse()
			True
			>>> sectionsFileParser.getValue("Attribute 1", "Section A")
			Value A

		:param attribute: Attribute name. ( String )
		:param section: Section containing the searched attribute. ( String )
		:param encode: Encode value to unicode. ( Boolean )
		:param default: Default return value. ( Object )
		:return: Attribute value. ( String )
		"""

		if not self.__sections:
			return default

		if not self.attributeExists(attribute, section):
			return default

		if attribute in self.__sections[section]:
			value = self.__sections[section][attribute]
		elif foundations.namespace.setNamespace(section, attribute) in self.__sections[section]:
			value = self.__sections[section][foundations.namespace.setNamespace(section, attribute)]
		LOGGER.debug("> Attribute: '{0}', value: '{1}'.".format(attribute, value))
		value = foundations.strings.encode(value) if encode else value
		return value

	def write(self,
			namespaces=False,
			splitter="=",
			commentLimiter=(";"),
			spacesAroundSplitter=True,
			spaceAfterCommentLimiter=True):
		"""
		This method writes defined file using :obj:`SectionsFileParser.sections` and
			:obj:`SectionsFileParser.comments` class properties content.

		Usage::

			>>> sections = {"Section A": {"Section A|Attribute 1": "Value A"}, \
"Section B": {"Section B|Attribute 2": "Value B"}}
			>>> sectionsFileParser = SectionsFileParser("SectionsFile.rc")
			>>> sectionsFileParser.sections = sections
			>>> sectionsFileParser.write()
			True
			>>> sectionsFileParser.read()
			True
			>>> print sectionsFileParser.content[0:5]
			['[Section A]\\n', 'Attribute 1 = Value A\\n', '\\n', '[Section B]\\n', 'Attribute 2 = Value B\\n', '\\n']

		:param namespaces: Attributes are namespaced. ( Boolean )
		:param splitter: Splitter character. ( String )
		:param commentLimiter: Comment limiter character. ( String )
		:param spacesAroundSplitter: Spaces around attributes and value splitters. ( Boolean )
		:param spaceAfterCommentLimiter: Space after comments limiter. ( Boolean )
		:return: Method success. ( Boolean )
		"""

		if not self.__sections:
			return False

		LOGGER.debug("> Setting '{0}' file content.".format(self.path))
		attributeTemplate = spacesAroundSplitter and "{{0}} {0} {{1}}\n".format(splitter) or \
							"{{0}} {0} {{1}}\n".format(splitter)
		attributeTemplate = foundations.strings.replace(attributeTemplate, {"{{" : "{", "}}" : "}"})
		commentTemplate = spaceAfterCommentLimiter and "{0} {{0}}\n".format(commentLimiter) or \
							"{0}{{0}}\n".format(commentLimiter)
		if self.__defaultsSection in self.__sections:
			LOGGER.debug("> Appending '{0}' default section.".format(self.__defaultsSection))
			if self.__comments:
				for comment, value in self.__comments.iteritems():
					if self.__defaultsSection in comment:
						value = value["content"] or ""
						LOGGER.debug("> Appending '{0}' comment with '{1}' value.".format(comment, value))
						self.content.append(commentTemplate.format(value))
			for attribute, value in self.__sections[self.__defaultsSection].iteritems():
				attribute = namespaces and attribute or foundations.namespace.removeNamespace(attribute,
																							self.__namespaceSplitter,
																							rootOnly=True)
				value = value or ""
				LOGGER.debug("> Appending '{0}' attribute with '{1}' value.".format(attribute, value))
				self.content.append(attributeTemplate.format(attribute, value))
			self.content.append("\n")

		for i, section in enumerate(self.__sections):
			LOGGER.debug("> Appending '{0}' section.".format(section))
			self.content.append("[{0}]\n".format(section))
			if self.__comments:
				for comment, value in self.__comments.iteritems():
					if section in comment:
						value = value["content"] or ""
						LOGGER.debug("> Appending '{0}' comment with '{1}' value.".format(comment, value))
						self.content.append(commentTemplate.format(value))
			for attribute, value in self.__sections[section].iteritems():
				if foundations.namespace.removeNamespace(attribute) == self.__rawSectionContentIdentifier:
					LOGGER.debug("> Appending '{0}' raw section content.".format(section))
					for line in value:
						self.content.append(line)
				else:
					LOGGER.debug("> Appending '{0}' section.".format(section))
					attribute = namespaces and attribute or foundations.namespace.removeNamespace(attribute,
																								self.__namespaceSplitter,
																								rootOnly=True)
					value = value or ""
					LOGGER.debug("> Appending '{0}' attribute with '{1}' value.".format(attribute, value))
					self.content.append(attributeTemplate.format(attribute, value))
			if i != len(self.__sections) - 1:
				self.content.append("\n")
		foundations.io.File.write(self)
		return True
Exemple #26
0
class Cache(object):
  '''
    Cache to hold cached contents
  '''
  index_filename = "index.pickle"
  # active requests are not counted.

  def __init__(self, path, entry_limit):
    self.path = path
    self.on_memory_entry_limit = entry_limit
    self.on_memory = OrderedDict() #FastAVLTree()
    self.index = {}
    self.load_index()

  def __contains__(self, key):
    return key in self.index 

  def __len__(self):
    return len(self.index)
  
  def __iter__(self):
    return self.index.values()

  def _make_path(self, fname):
    return os.path.join(self.path, fname)

  def make_entry(self, key):
    '''
      reserve
    '''
    assert key not in self.index
    entry = CacheEntry(key, self.path, 0.0, self.on_notify)
    #FIXME because cache entry is write once. read many.
    self.index[key] = entry
    return entry

  def on_notify(self, entry):
    print 'cache entries: ', len(self.on_memory)
    if entry.key in self.on_memory:
      self.touch(entry)
    else:
      self.push_to_memory(entry)

  def push_to_memory(self, entry):
    if len(self.on_memory) >=  self.on_memory_entry_limit:
      key, purged = self.on_memory.popitem(False) #popping first item
      print 'purged cache life=%f s since %f for %s' % (time.time() - purged.last_touch, purged.last_touch, purged.key)
      purged.move_to_disk()
    entry.touch()
    print "putting entry %s" % (entry.key)
    assert entry.datafile
    assert entry.last_touch > 1.0
    self.on_memory[entry.key] = entry

  def touch(self, entry):
    #revoke
    x = self.on_memory.pop(entry.key)
    assert x == entry
    # activate it as a new entry
    entry.touch()
    self.on_memory[entry.key] = entry

  def get(self, key):
    e = self.index.get(key, None)
    return e

  def pop(self, key):
    return self.index.pop(key)
    
  def load_index(self):
    p = self._make_path(self.index_filename)
    
    no_index = False
    try:
      f = open(p)
    except:
      f = None
      pass
    if f:
      try:
        self.index = pickle.load(f)
      except:
        no_index = True        
      finally:
        f.close()
    else:
      no_index = True

    if no_index: 
      self.index = {}
      self.save_index()

  def save_entries(self):
    print 'Cache.save_entries'
    for entry in self.index.itervalues():
      if entry.datafile:
        entry.move_to_disk()

  def save_index(self):
    print 'Cache.save_index'
    p = self._make_path(self.index_filename)
    for entry in self.index.itervalues():
      entry.abort()
    with open(p, 'w') as f:
      pickle.dump(self.index, f)

  def fix(self):
    to_delete = []
    for k, v in self.index.items():
      p = self._make_path(v)
      if not os.access(p, os.F_OK | os.R_OK | os.W_OK):
        to_delete.append[k]
    for k in to_delete:
      del self.index[k]
    self.save_index()

  def scan(self):
    ''' wrong idea. cant generate url from file name...'''
    #return os.path.join(self.path, fname)
    for fname in os.listdir(self.path):
      if fname == self.index_filename:
        continue

    

  def html_index(self):
    count = len(self.index)
    x = []
    for key, ce in self.on_memory.iteritems():
      x.append('<li>[%s ]: %s</li>\n'%(key, ce.status()))
    frag_mem = '<ol>%s</ol>'%(''.join(x))

    y = []
    for key, ce in self.index.iteritems():
      y.append('<li>[%s ]: %s</li>\n'%(key, ce.status()))
    frag_index = '<ol>%s</ol>'%(''.join(y))
    html = '''<html><body>
      <p>count:%s</p>
      %s
      <hr />
      %s
      </body></html>'''
    return html%(count, frag_mem, frag_index)
Exemple #27
0
class Inspector(QWidgetComponentFactory(uiFile=COMPONENT_UI_FILE)):
	"""
	| Defines the :mod:`sibl_gui.components.core.inspector.inspector` Component Interface class.
	| It offers a large preview of the current inspected Ibl Set, and a way to navigate
		into the current selected Database Collection.
	"""

	# Custom signals definitions.
	refreshNodes = pyqtSignal()
	"""
	This signal is emited by the :class:`Inspector` class when :obj:`Inspector.model` class property model
	nodes needs to be refreshed. ( pyqtSignal )
	"""

	uiRefresh = pyqtSignal()
	"""
	This signal is emited by the :class:`Inspector` class when the Ui needs to be refreshed. ( pyqtSignal )
	"""

	uiClear = pyqtSignal()
	"""
	This signal is emited by the :class:`Inspector` class when the Ui needs to be cleared. ( pyqtSignal )
	"""

	def __init__(self, parent=None, name=None, *args, **kwargs):
		"""
		Initializes the class.

		:param parent: Object parent.
		:type parent: QObject
		:param name: Component name.
		:type name: unicode
		:param \*args: Arguments.
		:type \*args: \*
		:param \*\*kwargs: Keywords arguments.
		:type \*\*kwargs: \*\*
		"""

		LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__))

		super(Inspector, self).__init__(parent, name, *args, **kwargs)

		# --- Setting class attributes. ---
		self.deactivatable = False

		self.__uiResourcesDirectory = "resources"
		self.__uiPreviousImage = "Previous.png"
		self.__uiNextImage = "Next.png"
		self.__uiLoadingImage = "Loading.png"
		self.__dockArea = 2
		self.__listViewIconSize = 30

		self.__engine = None
		self.__settings = None
		self.__settingsSection = None

		self.__preferencesManager = None
		self.__iblSetsOutliner = None

		self.__sectionsFileParsersCache = None

		self.__model = None
		self.__view = None

		self.__thumbnailsSize = "Special1"

		self.__activeIblSet = None
		self.__inspectorPlates = None

		self.__noPreviewImageText = """
								<center>
								<table border="0" bordercolor="" cellpadding="0" cellspacing="16">
									<tr>
										<td>
											<img src="{0}">
										</td>
										<td>
											<p><b>Preview Image is unavailable!<b></p>
											What now?
											<ul>
												<li>Check For an updated set on <b>HDRLabs</b> at
												<a href="http://www.hdrlabs.com/sibl/archive.html">
												<span style="text-decoration: underline; color:#e0e0e0;">
												http://www.hdrlabs.com/sibl/archive.html</span></a>.</li>
												<li>Contact <b>{1}</b> for an updated Ibl Set: <a href="{2}">
												<span style="text-decoration: underline; color:#e0e0e0;">{2}</span>
												</a></li>
												<li>Resize the background image to 600x300 pixels.<br/>
												Save it as a jpeg in your set directory.<br/>
												Register it in the ."ibl" file header using the "PREVIEWfile" attribute.
												</li>
											</ul>
										</td>
									</tr>
								</table>
								</center>
								"""
		self.__noActiveIblSetText = """
								<center>
								<table border="0" bordercolor="" cellpadding="0" cellspacing="16">
									<tr>
										<td>
											<img src="{0}">
										</td>
										<td>
											<p><b>No Ibl Set to inspect!<b></p>
											Please add some Ibl Set to the Database or select a non empty Collection!
										</td>
									</tr>
								</table>
								</center>
								"""
		self.__activeIblSetToolTipText = """
								<p><b>{0}</b></p>
								<p><b>Author: </b>{1}<br>
								<b>Location: </b>{2}<br>
								<b>Shot Date: </b>{3}<br>
								<b>Comment: </b>{4}</p>
								"""

		self.__lightLabelRadius = 4
		self.__lightLabelTextOffset = 24
		self.__lightLabelTextMargin = 16
		self.__lightLabelTextHeight = 14
		self.__lightLabelTextFont = "Helvetica"
		self.__unnamedLightName = "Unnamed_Light"

		self.__pixmapPlaceholder = None

	#******************************************************************************************************************
	#***	Attributes properties.
	#******************************************************************************************************************
	@property
	def uiResourcesDirectory(self):
		"""
		Property for **self.__uiResourcesDirectory** attribute.

		:return: self.__uiResourcesDirectory.
		:rtype: unicode
		"""

		return self.__uiResourcesDirectory

	@uiResourcesDirectory.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uiResourcesDirectory(self, value):
		"""
		Setter for **self.__uiResourcesDirectory** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "uiResourcesDirectory"))

	@uiResourcesDirectory.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uiResourcesDirectory(self):
		"""
		Deleter for **self.__uiResourcesDirectory** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "uiResourcesDirectory"))

	@property
	def uiPreviousImage(self):
		"""
		Property for **self.__uiPreviousImage** attribute.

		:return: self.__uiPreviousImage.
		:rtype: unicode
		"""

		return self.__uiPreviousImage

	@uiPreviousImage.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uiPreviousImage(self, value):
		"""
		Setter for **self.__uiPreviousImage** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "uiPreviousImage"))

	@uiPreviousImage.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uiPreviousImage(self):
		"""
		Deleter for **self.__uiPreviousImage** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "uiPreviousImage"))

	@property
	def uiNextImage(self):
		"""
		Property for **self.__uiNextImage** attribute.

		:return: self.__uiNextImage.
		:rtype: unicode
		"""

		return self.__uiNextImage

	@uiNextImage.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uiNextImage(self, value):
		"""
		Setter for **self.__uiNextImage** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "uiNextImage"))

	@uiNextImage.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uiNextImage(self):
		"""
		Deleter for **self.__uiNextImage** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "uiNextImage"))

	@property
	def uiLoadingImage(self):
		"""
		Property for **self.__uiLoadingImage** attribute.

		:return: self.__uiLoadingImage.
		:rtype: unicode
		"""

		return self.__uiLoadingImage

	@uiLoadingImage.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uiLoadingImage(self, value):
		"""
		Setter for **self.__uiLoadingImage** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "uiLoadingImage"))

	@uiLoadingImage.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uiLoadingImage(self):
		"""
		Deleter for **self.__uiLoadingImage** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "uiLoadingImage"))

	@property
	def dockArea(self):
		"""
		Property for **self.__dockArea** attribute.

		:return: self.__dockArea.
		:rtype: int
		"""

		return self.__dockArea

	@dockArea.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def dockArea(self, value):
		"""
		Setter for **self.__dockArea** attribute.

		:param value: Attribute value.
		:type value: int
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "dockArea"))

	@dockArea.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def dockArea(self):
		"""
		Deleter for **self.__dockArea** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "dockArea"))

	@property
	def listViewIconSize(self):
		"""
		Property for **self.__listViewIconSize** attribute.

		:return: self.__listViewIconSize.
		:rtype: int
		"""

		return self.__listViewIconSize

	@listViewIconSize.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def listViewIconSize(self, value):
		"""
		Setter for **self.__listViewIconSize** attribute.

		:param value: Attribute value.
		:type value: int
		"""

		if value is not None:
			assert type(value) is int, "'{0}' attribute: '{1}' type is not 'int'!".format("listViewIconSize", value)
			assert value > 0, "'{0}' attribute: '{1}' need to be exactly positive!".format("listViewIconSize", value)
		self.__listViewIconSize = value

	@listViewIconSize.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def listViewIconSize(self):
		"""
		Deleter for **self.__listViewIconSize** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "listViewIconSize"))

	@property
	def engine(self):
		"""
		Property for **self.__engine** attribute.

		:return: self.__engine.
		:rtype: QObject
		"""

		return self.__engine

	@engine.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def engine(self, value):
		"""
		Setter for **self.__engine** attribute.

		:param value: Attribute value.
		:type value: QObject
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "engine"))

	@engine.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def engine(self):
		"""
		Deleter for **self.__engine** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "engine"))

	@property
	def iblSetsOutliner(self):
		"""
		Property for **self.__iblSetsOutliner** attribute.

		:return: self.__iblSetsOutliner.
		:rtype: QWidget
		"""

		return self.__iblSetsOutliner

	@iblSetsOutliner.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def iblSetsOutliner(self, value):
		"""
		Setter for **self.__iblSetsOutliner** attribute.

		:param value: Attribute value.
		:type value: QWidget
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "iblSetsOutliner"))

	@iblSetsOutliner.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def iblSetsOutliner(self):
		"""
		Deleter for **self.__iblSetsOutliner** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "iblSetsOutliner"))

	@property
	def sectionsFileParsersCache(self):
		"""
		Property for **self.__sectionsFileParsersCache** attribute.

		:return: self.__sectionsFileParsersCache.
		:rtype: Cache
		"""

		return self.__sectionsFileParsersCache

	@sectionsFileParsersCache.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def sectionsFileParsersCache(self, value):
		"""
		Setter for **self.__sectionsFileParsersCache** attribute.

		:param value: Attribute value.
		:type value: Cache
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "sectionsFileParsersCache"))

	@sectionsFileParsersCache.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def sectionsFileParsersCache(self):
		"""
		Deleter for **self.__sectionsFileParsersCache** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "sectionsFileParsersCache"))

	@property
	def model(self):
		"""
		Property for **self.__model** attribute.

		:return: self.__model.
		:rtype: PlatesModel
		"""

		return self.__model

	@model.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def model(self, value):
		"""
		Setter for **self.__model** attribute.

		:param value: Attribute value.
		:type value: PlatesModel
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "model"))

	@model.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def model(self):
		"""
		Deleter for **self.__model** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "model"))

	@property
	def view(self):
		"""
		Property for **self.__view** attribute.

		:return: self.__view.
		:rtype: QWidget
		"""

		return self.__view

	@view.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def view(self, value):
		"""
		Setter for **self.__view** attribute.

		:param value: Attribute value.
		:type value: QWidget
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "view"))

	@view.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def view(self):
		"""
		Deleter for **self.__view** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "view"))

	@property
	def thumbnailsSize(self):
		"""
		Property for **self.__thumbnailsSize** attribute.

		:return: self.__thumbnailsSize.
		:rtype: unicode
		"""

		return self.__thumbnailsSize

	@thumbnailsSize.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def thumbnailsSize(self, value):
		"""
		Setter for **self.__thumbnailsSize** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format("thumbnailsSize",
																								  value)
		self.__thumbnailsSize = value

	@thumbnailsSize.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def thumbnailsSize(self):
		"""
		Deleter for **self.__thumbnailsSize** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "thumbnailsSize"))

	@property
	def activeIblSet(self):
		"""
		Property for **self.__activeIblSet** attribute.

		:return: self.__activeIblSet.
		:rtype: IblSet
		"""

		return self.__activeIblSet

	@activeIblSet.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def activeIblSet(self, value):
		"""
		Setter for **self.__activeIblSet** attribute.

		:param value: Attribute value.
		:type value: IblSet
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "activeIblSet"))

	@activeIblSet.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def activeIblSet(self):
		"""
		Deleter for **self.__activeIblSet** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "activeIblSet"))

	@property
	def inspectorPlates(self):
		"""
		Property for **self.__inspectorPlates** attribute.

		:return: self.__inspectorPlates.
		:rtype: dict
		"""

		return self.__inspectorPlates

	@inspectorPlates.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def inspectorPlates(self, value):
		"""
		Setter for **self.__inspectorPlates** attribute.

		:param value: Attribute value.
		:type value: dict
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "inspectorPlates"))

	@inspectorPlates.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def inspectorPlates(self):
		"""
		Deleter for **self.__inspectorPlates** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "inspectorPlates"))

	@property
	def noPreviewImageText(self):
		"""
		Property for **self.__noPreviewImageText** attribute.

		:return: self.__noPreviewImageText.
		:rtype: unicode
		"""

		return self.__noPreviewImageText

	@noPreviewImageText.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def noPreviewImageText(self, value):
		"""
		Setter for **self.__noPreviewImageText** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "noPreviewImageText"))

	@noPreviewImageText.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def noPreviewImageText(self):
		"""
		Deleter for **self.__noPreviewImageText** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "noPreviewImageText"))

	@property
	def noActiveIblSetText(self):
		"""
		Property for **self.__noActiveIblSetText** attribute.

		:return: self.__noActiveIblSetText.
		:rtype: unicode
		"""

		return self.__noActiveIblSetText

	@noActiveIblSetText.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def noActiveIblSetText(self, value):
		"""
		Setter for **self.__noActiveIblSetText** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "noActiveIblSetText"))

	@noActiveIblSetText.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def noActiveIblSetText(self):
		"""
		Deleter for **self.__noActiveIblSetText** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "noActiveIblSetText"))

	@property
	def activeIblSetToolTipText(self):
		"""
		Property for **self.__activeIblSetToolTipText** attribute.

		:return: self.__activeIblSetToolTipText.
		:rtype: unicode
		"""

		return self.__activeIblSetToolTipText

	@activeIblSetToolTipText.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def activeIblSetToolTipText(self, value):
		"""
		Setter for **self.__activeIblSetToolTipText** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "activeIblSetToolTipText"))

	@activeIblSetToolTipText.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def activeIblSetToolTipText(self):
		"""
		Deleter for **self.__activeIblSetToolTipText** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "activeIblSetToolTipText"))

	@property
	def lightLabelRadius(self):
		"""
		Property for **self.__lightLabelRadius** attribute.

		:return: self.__lightLabelRadius.
		:rtype: int
		"""

		return self.__lightLabelRadius

	@lightLabelRadius.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelRadius(self, value):
		"""
		Setter for **self.__lightLabelRadius** attribute.

		:param value: Attribute value.
		:type value: int
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "lightLabelRadius"))

	@lightLabelRadius.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelRadius(self):
		"""
		Deleter for **self.__lightLabelRadius** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "lightLabelRadius"))

	@property
	def lightLabelTextOffset(self):
		"""
		Property for **self.__lightLabelTextOffset** attribute.

		:return: self.__lightLabelTextOffset.
		:rtype: int
		"""

		return self.__lightLabelTextOffset

	@lightLabelTextOffset.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelTextOffset(self, value):
		"""
		Setter for **self.__lightLabelTextOffset** attribute.

		:param value: Attribute value.
		:type value: int
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "lightLabelTextOffset"))

	@lightLabelTextOffset.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelTextOffset(self):
		"""
		Deleter for **self.__lightLabelTextOffset** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "lightLabelTextOffset"))

	@property
	def lightLabelTextMargin(self):
		"""
		Property for **self.__lightLabelTextMargin** attribute.

		:return: self.__lightLabelTextMargin.
		:rtype: int
		"""

		return self.__lightLabelTextMargin

	@lightLabelTextMargin.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelTextMargin(self, value):
		"""
		Setter for **self.__lightLabelTextMargin** attribute.

		:param value: Attribute value.
		:type value: int
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "lightLabelTextMargin"))

	@lightLabelTextMargin.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelTextMargin(self):
		"""
		Deleter for **self.__lightLabelTextMargin** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "lightLabelTextMargin"))

	@property
	def lightLabelTextHeight(self):
		"""
		Property for **self.__lightLabelTextHeight** attribute.

		:return: self.__lightLabelTextHeight.
		:rtype: int
		"""

		return self.__lightLabelTextHeight

	@lightLabelTextHeight.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelTextHeight(self, value):
		"""
		Setter for **self.__lightLabelTextHeight** attribute.

		:param value: Attribute value.
		:type value: int
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "lightLabelTextHeight"))

	@lightLabelTextHeight.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelTextHeight(self):
		"""
		Deleter for **self.__lightLabelTextHeight** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "lightLabelTextHeight"))

	@property
	def lightLabelTextFont(self):
		"""
		Property for **self.__lightLabelTextFont** attribute.

		:return: self.__lightLabelTextFont.
		:rtype: unicode
		"""

		return self.__lightLabelTextFont

	@lightLabelTextFont.setter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelTextFont(self, value):
		"""
		Setter for **self.__lightLabelTextFont** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is read only!".format(self.__class__.__name__, "lightLabelTextFont"))

	@lightLabelTextFont.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def lightLabelTextFont(self):
		"""
		Deleter for **self.__lightLabelTextFont** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "lightLabelTextFont"))

	@property
	def unnamedLightName(self):
		"""
		Property for **self.__unnamedLightName** attribute.

		:return: self.__unnamedLightName.
		:rtype: unicode
		"""

		return self.__unnamedLightName

	@unnamedLightName.setter
	@foundations.exceptions.handleExceptions(AssertionError)
	def unnamedLightName(self, value):
		"""
		Setter for **self.__unnamedLightName** attribute.

		:param value: Attribute value.
		:type value: unicode
		"""

		if value is not None:
			assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
				"unnamedLightName", value)
		self.__unnamedLightName = value

	@unnamedLightName.deleter
	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def unnamedLightName(self):
		"""
		Deleter for **self.__unnamedLightName** attribute.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' attribute is not deletable!".format(self.__class__.__name__, "unnamedLightName"))

	#******************************************************************************************************************
	#***	Class methods.
	#******************************************************************************************************************
	def activate(self, engine):
		"""
		Activates the Component.

		:param engine: Engine to attach the Component to.
		:type engine: QObject
		:return: Method success.
		:rtype: bool
		"""

		LOGGER.debug("> Activating '{0}' Component.".format(self.__class__.__name__))

		self.__uiResourcesDirectory = os.path.join(os.path.dirname(__file__), self.__uiResourcesDirectory)
		self.__engine = engine
		self.__settings = self.__engine.settings
		self.__settingsSection = self.name

		self.__preferencesManager = self.__engine.componentsManager["factory.preferencesManager"]
		self.__iblSetsOutliner = self.__engine.componentsManager["core.iblSetsOutliner"]

		self.activated = True
		return True

	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def deactivate(self):
		"""
		Deactivates the Component.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' Component cannot be deactivated!".format(self.__class__.__name__, self.__name))

	def initializeUi(self):
		"""
		Initializes the Component ui.

		:return: Method success.
		:rtype: bool
		"""

		LOGGER.debug("> Initializing '{0}' Component ui.".format(self.__class__.__name__))

		self.__pixmapPlaceholder = \
			sibl_gui.ui.common.getPixmap(os.path.join(self.__uiResourcesDirectory, self.__uiLoadingImage),
										 asynchronousLoading=False)

		self.__sectionsFileParsersCache = foundations.cache.Cache()

		self.__model = PlatesModel()

		self.Plates_listView.setParent(None)
		self.Plates_listView = Plates_QListView(self, self.__model)
		self.Plates_listView.setObjectName("Plates_listView")
		self.Plates_frame_gridLayout.addWidget(self.Plates_listView, 0, 1)
		self.__view = self.Plates_listView
		self.__view.storeModelSelection = self.__view.restoreModelSelection = lambda: True

		self.Previous_Ibl_Set_pushButton.setIcon(
			QIcon(os.path.join(self.__uiResourcesDirectory, self.__uiPreviousImage)))
		self.Next_Ibl_Set_pushButton.setIcon(QIcon(os.path.join(self.__uiResourcesDirectory, self.__uiNextImage)))
		self.Previous_Plate_pushButton.setIcon(QIcon(os.path.join(self.__uiResourcesDirectory, self.__uiPreviousImage)))
		self.Next_Plate_pushButton.setIcon(QIcon(os.path.join(self.__uiResourcesDirectory, self.__uiNextImage)))

		self.Plates_frame.hide()
		self.Inspector_Options_groupBox.hide()

		self.__Inspector_DockWidget_setUi()

		self.Inspector_Overall_frame.setContextMenuPolicy(Qt.ActionsContextMenu)
		self.__Inspector_Overall_frame_addActions()

		# Signals / Slots.
		self.__engine.imagesCaches.QIcon.contentAdded.connect(self.__view.viewport().update)
		self.__engine.imagesCaches.QPixmap.contentAdded.connect(self.__engine_imagesCaches_QPixmap__contentAdded)
		self.Plates_listView.selectionModel().selectionChanged.connect(self.__view_selectionModel__selectionChanged)
		self.__iblSetsOutliner.model.modelReset.connect(self.__iblSetsOutliner__modelReset)
		self.__engine.fileSystemEventsManager.fileChanged.connect(self.__engine_fileSystemEventsManager__fileChanged)
		for view in self.__iblSetsOutliner.views:
			view.selectionModel().selectionChanged.connect(self.__iblSetsOutliner_view_selectionModel__selectionChanged)
		self.Previous_Ibl_Set_pushButton.clicked.connect(self.__Previous_Ibl_Set_pushButton__clicked)
		self.Next_Ibl_Set_pushButton.clicked.connect(self.__Next_Ibl_Set_pushButton__clicked)
		self.Previous_Plate_pushButton.clicked.connect(self.__Previous_Plate_pushButton__clicked)
		self.Next_Plate_pushButton.clicked.connect(self.__Next_Plate_pushButton__clicked)
		self.Image_label.linkActivated.connect(self.__Image_label__linkActivated)
		self.refreshNodes.connect(self.__model__refreshNodes)
		self.uiRefresh.connect(self.__Inspector_DockWidget_refreshUi)
		self.uiClear.connect(self.__Inspector_DockWidget_clearUi)

		self.initializedUi = True
		return True

	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def uninitializeUi(self):
		"""
		Uninitializes the Component ui.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' Component ui cannot be uninitialized!".format(self.__class__.__name__, self.name))

	def addWidget(self):
		"""
		Adds the Component Widget to the engine.

		:return: Method success.
		:rtype: bool
		"""

		LOGGER.debug("> Adding '{0}' Component Widget.".format(self.__class__.__name__))

		self.__engine.addDockWidget(Qt.DockWidgetArea(self.__dockArea), self)

		return True

	@foundations.exceptions.handleExceptions(foundations.exceptions.ProgrammingError)
	def removeWidget(self):
		"""
		Removes the Component Widget from the engine.
		"""

		raise foundations.exceptions.ProgrammingError(
			"{0} | '{1}' Component Widget cannot be removed!".format(self.__class__.__name__, self.name))

	def __Inspector_DockWidget_setUi(self):
		"""
		Sets the :mod:`sibl_gui.components.core.inspector.inspector` Component Widget ui.
		"""

		if self.__activeIblSet:
			self.Title_label.setText("<center><b>{0}</b> - {1}</center>".format(self.__activeIblSet.title,
																				self.__activeIblSet.location))

			previewAvailable = False
			if foundations.common.pathExists(self.__activeIblSet.previewImage):
				pixmap = sibl_gui.ui.common.getPixmap(self.__activeIblSet.previewImage)
				previewAvailable = True
			else:
				if foundations.common.pathExists(self.__activeIblSet.backgroundImage):
					pixmap = sibl_gui.ui.common.getPixmap(self.__activeIblSet.backgroundImage,
														  size=self.__thumbnailsSize,
														  placeholder=self.__pixmapPlaceholder)
					previewAvailable = True

			if previewAvailable:
				self.Image_label.setPixmap(pixmap)
				self.__drawActiveIblSetOverlay()
			else:
				self.Image_label.setText(self.__noPreviewImageText.format(
					sibl_gui.ui.common.filterImagePath(self.__activeIblSet.icon),
					self.__activeIblSet.author,
					self.__activeIblSet.link))

			self.Image_label.setToolTip(self.__activeIblSetToolTipText.format(
				self.__activeIblSet.title,
				self.__activeIblSet.author or Constants.nullObject,
				self.__activeIblSet.location or Constants.nullObject,
				sibl_gui.ui.common.getFormattedShotDate(self.__activeIblSet.date,
													   self.__activeIblSet.time) or Constants.nullObject,
				self.__activeIblSet.comment or Constants.nullObject))

			self.Details_label.setText("<center><b>Comment:</b> {0}</center>".format(self.__activeIblSet.comment))

			self.Plates_frame.setVisible(bool(self.__inspectorPlates))
		else:
			self.__Inspector_DockWidget_clearUi()

	def __Inspector_DockWidget_refreshUi(self):
		"""
		Sets the :mod:`sibl_gui.components.core.inspector.inspector` Component Widget ui.
		"""

		self.__Inspector_DockWidget_setUi()

	def __Inspector_DockWidget_clearUi(self):
		"""
		Clears the :mod:`sibl_gui.components.core.inspector.inspector` Component Widget ui.
		"""

		self.Title_label.setText(QString())
		self.Image_label.setText(self.__noActiveIblSetText.format(sibl_gui.ui.common.filterImagePath("")))
		self.Image_label.setToolTip(QString())
		self.Details_label.setText(QString())

		self.Plates_frame.hide()

	def __Inspector_Overall_frame_addActions(self):
		"""
		Sets the **Inspector_Overall_frame** actions.
		"""

		pass

	def __model__refreshNodes(self):
		"""
		Refreshes the **Plates_listView** Model nodes.
		"""

		self.setPlates()

	def __view_selectionModel__selectionChanged(self, selectedItems, deselectedItems):
		"""
		Defines the slot triggered by **Plates_listView** when Model selection has changed.

		:param selectedItems: Selected items.
		:type selectedItems: QItemSelection
		:param deselectedItems: Deselected items.
		:type deselectedItems: QItemSelection
		"""

		index = foundations.common.getFirstItem(selectedItems.indexes())
		node = index and self.__model.getNode(index) or None
		if not node:
			return

		if node.family == "Plate":
			self.Image_label.setPixmap(sibl_gui.ui.common.getPixmap(node.plate.previewImage, asynchronousLoading=False))
		else:
			self.uiRefresh.emit()

	def __engine_fileSystemEventsManager__fileChanged(self, file):
		"""
		Defines the slot triggered by the **fileSystemEventsManager** when a file is changed.

		:param file: File changed.
		:type file: unicode
		"""

		file = foundations.strings.toString(file)
		if file in self.__sectionsFileParsersCache:
			LOGGER.debug("> Removing modified '{0}' file from cache.".format(file))
			self.__sectionsFileParsersCache.removeContent(file)

			if not self.__activeIblSet:
				return

			if self.__activeIblSet.path == file:
				self.__setActiveIblSet()
				self.uiRefresh.emit()

	def __engine_imagesCaches_QPixmap__contentAdded(self, paths):
		"""
		Defines the slot triggered by the **QPixmap** images cache when content is added.

		:param paths: Added content.
		:type paths: list
		"""

		if not self.__activeIblSet:
			return

		if foundations.common.getFirstItem(paths) in (self.__activeIblSet.previewImage,
													  self.__activeIblSet.backgroundImage):
			self.__Inspector_DockWidget_setUi()

	def __iblSetsOutliner__modelReset(self):
		"""
		Defines the slot triggered by :mod:`sibl_gui.components.core.iblSetsOutliner.iblSetsOutliner`
		Component Model when changed.
		"""

		self.__setActiveIblSet()

	def __iblSetsOutliner_view_selectionModel__selectionChanged(self, selectedItems, deselectedItems):
		"""
		Defines the slot triggered by :mod:`sibl_gui.components.core.iblSetsOutliner.iblSetsOutliner`
		Component Model selection when changed.

		:param selectedItems: Selected items.
		:type selectedItems: QItemSelection
		:param deselectedItems: Deselected items.
		:type deselectedItems: QItemSelection
		"""

		self.__setActiveIblSet()

		self.__setActiveIblSetPlates()
		self.refreshNodes.emit()

		if self.__activeIblSet:
			self.uiRefresh.emit()
		else:
			self.uiClear.emit()

	def __Previous_Ibl_Set_pushButton__clicked(self, checked):
		"""
		Defines the slot triggered by **Previous_Ibl_Set_pushButton** Widget when clicked.

		:param checked: Checked state.
		:type checked: bool
		"""

		self.loopThroughIblSets(True)

	def __Next_Ibl_Set_pushButton__clicked(self, checked):
		"""
		Defines the slot triggered by **Next_Ibl_Set_pushButton** Widget when clicked.

		:param checked: Checked state.
		:type checked: bool
		"""

		self.loopThroughIblSets()

	def __Previous_Plate_pushButton__clicked(self, checked):
		"""
		Defines the slot triggered by **Previous_Plate_pushButton** Widget when clicked.

		:param checked: Checked state.
		:type checked: bool
		"""

		self.loopThroughPlates(True)

	def __Next_Plate_pushButton__clicked(self, checked):
		"""
		Defines the slot triggered by **Next_Plate_pushButton** Widget when clicked.

		:param checked: Checked state.
		:type checked: bool
		"""

		self.loopThroughPlates()

	def __Image_label__linkActivated(self, url):
		"""
		Defines the slot triggered by **Image_label** Widget when a link is clicked.

		:param url: Url to explore.
		:type url: QString
		"""

		QDesktopServices.openUrl(QUrl(url))

	def __setActiveIblSet(self):
		"""
		Sets the :mod:`sibl_gui.components.core.inspector.inspector` Component Ibl Set.
		"""

		selectedIblSets = self.__iblSetsOutliner.getSelectedIblSets()
		self.__activeIblSet = foundations.common.getFirstItem(selectedIblSets)
		if not self.__activeIblSet:
			rootNode = self.__iblSetsOutliner.model.rootNode
			childNode = foundations.common.getFirstItem(rootNode.children)
			self.__activeIblSet = childNode.databaseItem if childNode is not None else None
		self.__activeIblSet and self.__setActiveIblSetParser()

	def __setActiveIblSetParser(self):
		"""
		Sets the :mod:`sibl_gui.components.core.inspector.inspector` Component Ibl Set parser.
		"""

		if foundations.common.pathExists(self.__activeIblSet.path):
			LOGGER.debug("> Parsing Inspector Ibl Set file: '{0}'.".format(self.__activeIblSet))

			if not self.__sectionsFileParsersCache.getContent(self.__activeIblSet.path):
				sectionsFileParser = SectionsFileParser(self.__activeIblSet.path)
				sectionsFileParser.parse()
				self.__sectionsFileParsersCache.addContent(**{self.__activeIblSet.path: sectionsFileParser})

	@foundations.exceptions.handleExceptions(foundations.exceptions.FileExistsError)
	def __setActiveIblSetPlates(self):
		"""
		Sets the Plates from the :mod:`sibl_gui.components.core.inspector.inspector` Component Ibl Set.
		"""

		path = self.__activeIblSet.path
		if not foundations.common.pathExists(path):
			raise foundations.exceptions.FileExistsError(
				"{0} | Exception raised while retrieving Plates: '{1}' Ibl Set file doesn't exists!".format(
					self.__class__.__name__, self.__activeIblSet.title))

		sectionsFileParser = self.__sectionsFileParsersCache.getContent(path)
		self.__inspectorPlates = OrderedDict()
		for section in sectionsFileParser.sections:
			if re.search(r"Plate\d+", section):
				self.__inspectorPlates[section] = \
					Plate(
						name=foundations.strings.getSplitextBasename(sectionsFileParser.getValue("PLATEfile", section)),
						icon=os.path.normpath(os.path.join(os.path.dirname(self.__activeIblSet.path),
														   sectionsFileParser.getValue("PLATEthumb", section))),
						previewImage=os.path.normpath(os.path.join(os.path.dirname(self.__activeIblSet.path),
																   sectionsFileParser.getValue("PLATEpreview",
																							   section))),
						image=os.path.normpath(os.path.join(os.path.dirname(self.__activeIblSet.path),
															sectionsFileParser.getValue("PLATEfile", section))))

	@foundations.exceptions.handleExceptions(foundations.exceptions.ExecutionError, ValueError)
	def __drawActiveIblSetOverlay(self):
		"""
		Draws an overlay on :obj:`Inspector.Image_Label` Widget.
		"""

		painter = QPainter(self.Image_label.pixmap())
		painter.setRenderHints(QPainter.Antialiasing)

		iblSetPath = self.__activeIblSet.path
		sectionsFileParser = self.__sectionsFileParsersCache.getContent(iblSetPath)
		if sectionsFileParser is None:
			raise foundations.exceptions.ExecutionError(
				"'{0}' Ibl Set file 'SectionsFileParser' instance not found!".format(iblSetPath))

		for section in sectionsFileParser.sections:
			if section == "Sun":
				self.__drawLightLabel(painter,
									  Light(name="Sun",
											color=[int(value) for value in sectionsFileParser.getValue(
												"SUNcolor", section).split(",")],
											uCoordinate=float(sectionsFileParser.getValue("SUNu", section)),
											vCoordinate=float(sectionsFileParser.getValue("SUNv", section))))

			elif re.search(r"Light\d+", section):
				self.__drawLightLabel(painter, Light(name=sectionsFileParser.getValue(
					"LIGHTname", section) or self.__unnamedLightName,
													 color=[int(value) for value in sectionsFileParser.getValue(
														 "LIGHTcolor", section).split(",")],
													 uCoordinate=float(
														 sectionsFileParser.getValue("LIGHTu", section)),
													 vCoordinate=float(
														 sectionsFileParser.getValue("LIGHTv", section))))

		painter.end()

	def __drawLightLabel(self, painter, light):
		"""
		Draws a light label on given QPainter.

		:param painter: QPainter.
		:type painter: QPainter
		:param light: Light.
		:type light: Light
		"""

		width = painter.window().width()
		height = painter.window().height()

		lightColorRed, lightColorGreen, lightColorBlue = light.color

		painter.setBrush(QColor(lightColorRed, lightColorGreen, lightColorBlue, 200))
		painter.setPen(QPen(QBrush(QColor(lightColorRed, lightColorGreen, lightColorBlue, 200)), 2))
		font = QFont(self.__lightLabelTextFont, self.__lightLabelTextHeight)
		font.setBold(True)
		painter.setFont(font)

		pointX = int(light.uCoordinate * width)
		pointY = int(light.vCoordinate * height)

		textWidth = painter.fontMetrics().width(light.name.title())
		xLabelTextOffset = -(self.__lightLabelTextOffset + textWidth) if \
			pointX + textWidth + self.__lightLabelTextMargin + self.__lightLabelTextOffset > width else \
			self.__lightLabelTextOffset
		yLabelTextOffset = -(self.__lightLabelTextOffset + self.__lightLabelTextHeight) if \
			pointY - (self.__lightLabelTextHeight + self.__lightLabelTextMargin + self.__lightLabelTextOffset) < 0 else \
			self.__lightLabelTextOffset
		painter.drawText(pointX + xLabelTextOffset, pointY - yLabelTextOffset, light.name.title())

		painter.drawLine(pointX,
						 pointY,
						 pointX + (xLabelTextOffset + textWidth if xLabelTextOffset < 0 else xLabelTextOffset),
						 pointY - (yLabelTextOffset + self.__lightLabelTextHeight \
									   if yLabelTextOffset < 0 else yLabelTextOffset))

		painter.drawEllipse(QPoint(pointX, pointY), self.__lightLabelRadius, self.__lightLabelRadius)

		painter.setBrush(Qt.NoBrush)
		painter.setPen(QPen(QBrush(QColor(lightColorRed, lightColorGreen, lightColorBlue, 100)), 2))
		painter.drawEllipse(QPoint(pointX, pointY), self.__lightLabelRadius * 3, self.__lightLabelRadius * 3)
		painter.setPen(QPen(QBrush(QColor(lightColorRed, lightColorGreen, lightColorBlue, 50)), 2))
		painter.drawEllipse(QPoint(pointX, pointY), self.__lightLabelRadius * 4, self.__lightLabelRadius * 4)

	def setPlates(self):
		"""
		Sets the Plates Model nodes.

		:return: Method success.
		:rtype: bool
		"""

		LOGGER.debug("> Setting up '{0}' Model!".format("Plates_listView"))

		nodeFlags = attributesFlags = int(Qt.ItemIsSelectable | Qt.ItemIsEnabled)
		rootNode = umbra.ui.nodes.DefaultNode(name="InvisibleRootNode")
		iblSetNode = IblSetNode(self.__activeIblSet,
								name=self.__activeIblSet.title,
								parent=rootNode,
								nodeFlags=nodeFlags,
								attributesFlags=attributesFlags,
								iconPath = self.__activeIblSet.icon)
		iblSetNode.roles[Qt.DisplayRole] = ""

		if not self.__inspectorPlates:
			return False

		for name, plate in self.__inspectorPlates.iteritems():
			plateNode = PlatesNode(plate,
								   name=name,
								   parent=rootNode,
								   nodeFlags=nodeFlags,
								   attributesFlags=attributesFlags)
			plateNode.roles[Qt.DisplayRole] = ""
			plateNode.roles[Qt.DecorationRole] = foundations.common.filterPath(plate.icon)

		self.__model.initializeModel(rootNode)
		return True

	def loopThroughIblSets(self, backward=False):
		"""
		Loops through :mod:`sibl_gui.components.core.iblSetsOutliner.iblSetsOutliner` Component Ibl Sets.

		:param backward: Looping backward.
		:type backward: bool
		:return: Method success.
		:rtype: bool
		"""

		if self.__activeIblSet:
			model = self.__iblSetsOutliner.model

			activeIblSetNode = [node for node in model.rootNode.children if
								node.databaseItem.path == self.__activeIblSet.path]
			activeIblSetNode = foundations.common.getFirstItem(activeIblSetNode)
			if not activeIblSetNode:
				return True

			row = activeIblSetNode.row()

			step = not backward and 1 or -1
			idx = row + step
			if idx < 0:
				idx = model.rootNode.childrenCount() - 1
			elif idx > model.rootNode.childrenCount() - 1:
				idx = 0

			selectionModel = self.__iblSetsOutliner.getActiveView().selectionModel()
			selectionModel.clear()
			selectionModel.setCurrentIndex(model.index(idx), QItemSelectionModel.Select)
		else:
			self.uiClear.emit()
		return True

	def loopThroughPlates(self, backward=False):
		"""
		Loops through :mod:`sibl_gui.components.core.inspector.inspector` Component Ibl Set Plates.

		:param backward: Looping backward.
		:type backward: bool
		:return: Method success.
		:rtype: bool
		"""

		index = foundations.common.getFirstItem(self.Plates_listView.selectedIndexes())
		if index:
			step = not backward and 1 or -1
			idx = index.row() + step
			if idx < 0:
				idx = self.__model.rowCount() - 1
			elif idx > self.__model.rowCount() - 1:
				idx = 0

			selectionModel = self.Plates_listView.selectionModel()
			selectionModel.clear()
			selectionModel.setCurrentIndex(self.__model.index(idx), QItemSelectionModel.Select)
		else:
			self.Plates_listView.setCurrentIndex(self.__model.index(0, 0))
		return True