Ejemplo n.º 1
0
def runPythonExec(script, kwargs =None):
    from nimble.NimbleEnvironment import NimbleEnvironment
    from nimble.data.NimbleResponseData import NimbleResponseData
    from nimble.data.enum.DataKindEnum import DataKindEnum

    try:
        nimble.cmds.undoInfo(openChunk=True)
    except Exception as err:
        return False

    try:
        # Create a new, temporary module in which to run the script
        module = imp.new_module('runExecTempModule')

        # Initialize the script with script inputs
        setattr(module, NimbleEnvironment.REMOTE_KWARGS_KEY, kwargs if kwargs is not None else dict())
        setattr(module, NimbleEnvironment.REMOTE_RESULT_KEY, dict())

        # Executes the script in the new module
        exec_(script, module.__dict__)

        # Find a NimbleScriptBase derived class definition and if it exists, run it to populate the
        # results
        for name,value in Reflection.getReflectionDict(module).iteritems():
            if not inspect.isclass(value):
                continue

            if NimbleScriptBase in value.__bases__:
                getattr(module, name)().run()
                break

        # Retrieve the results object that contains all results set by the execution of the script
        result = getattr(module, NimbleEnvironment.REMOTE_RESULT_KEY)
    except Exception as err:
        logger = Logger('runPythonExec', printOut=True)
        logger.writeError('ERROR: Failed Remote Script Execution', err)
        result = NimbleResponseData(
            kind=DataKindEnum.PYTHON_SCRIPT,
            response=NimbleResponseData.FAILED_RESPONSE,
            error=str(err) )

    # If a result dictionary contains an error key format the response as a failure
    try:
        errorMessage = ArgsUtils.extract(
            NimbleEnvironment.REMOTE_RESULT_ERROR_KEY, None, result)
        if errorMessage:
            return NimbleResponseData(
                kind=DataKindEnum.PYTHON_SCRIPT,
                response=NimbleResponseData.FAILED_RESPONSE,
                error=errorMessage,
                payload=result)
    except Exception as err:
        pass

    try:
        nimble.cmds.undoInfo(closeChunk=True)
    except Exception as err:
        return False

    return result
Ejemplo n.º 2
0
class SocketHandler(SocketServer.StreamRequestHandler):
    """A class for..."""

#===================================================================================================
#                                                                                       C L A S S

    SERVICE_UID   = 'test'
    VERBOSE       = False
    WORK_PATH     = '/var/lib/'
    RUN_PATH      = '/var/run/'
    LOG_PATH      = '/var/log/'

#___________________________________________________________________________________________________ __init__
    def __init__(self, request, client_address, server):
        self._log = Logger(self)
        self._log.write('Socket handler created')

        SocketServer.StreamRequestHandler.__init__(self, request, client_address, server)

#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: returnResponse
    @property
    def returnResponse(self):
        return getattr(self.__class__, 'RETURN_RESPONSE', False)

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ handle
    def handle(self):
        try:
            data = self.rfile.readline().strip()
            self._log.write('HANDLE: ' + str(data))
            try:
                result = self._respondImpl(JSON.fromString(unquote(data)))
            except Exception as err:
                self._log.writeError('RESPOND FAILURE', err)
                if self.returnResponse:
                    self.wfile.write(JSON.asString({'error':1}))
                return

            if self.returnResponse:
                out = {'error':0}
                if result:
                    out['payload'] = result
                self.wfile.write(out)
        except Exception as err:
            self._log.write('HANDLE FAILURE', err)

        return

#===================================================================================================
#                                                                               P R O T E C T E D

#___________________________________________________________________________________________________ _respondImpl
    def _respondImpl(self, data):
        pass
Ejemplo n.º 3
0
class DataFormatConverter(object):
    """A class for converting between various data interchange formats, e.g. XML and JSON."""

#===================================================================================================
#                                                                                       C L A S S

#___________________________________________________________________________________________________ __init__
    def __init__(self):
        """Creates a new instance of ClassTemplate."""
        self._type = None
        self._src  = None
        self._log  = Logger('DataFormatConverter')
        self._path = None

#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: propertyName
    @property
    def source(self):
        return self._src

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ load
    def load(self, path, fileType):
        if not os.path.exists(path):
            self._log.write('ERROR: Path does not exist [%s]. Unable to load.' % path)
            return False

        try:
            fh  = codecs.open(path, 'r', 'utf-8')
            res = fh.read()
            fh.close()
            enc = res.encode('utf-8')
            self.loads(enc, fileType)
        except Exception, err:
            self._log.writeError('Failed to load source file [%s].' % path, err)
            return False

        self._path = path
        return True
Ejemplo n.º 4
0
class IncludeCompressor(object):

#===================================================================================================
#                                                                                       C L A S S

    _REMOVE_COMMENT_RE      = re.compile('/\*.+\*/', re.DOTALL)
    _REMOVE_COMMENT_LINE_RE = re.compile('(^|\n)[\s\t]*//.+(\n|$)')

    JS_TYPE  = 'js'
    CSS_TYPE = 'css'

#___________________________________________________________________________________________________ __init__
    def __init__(self, compileCoffee =False):
        self._log           = Logger('IncludeCompressor')
        self._compileCoffee = compileCoffee

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ compress
    def compress(self, rootPath):
        if not self._fileExists(rootPath):
            return False
        elif os.path.isfile(rootPath):
            return self.compressFile(rootPath)
        else:
            return self.compressPath(rootPath)

#___________________________________________________________________________________________________ compressFile
    def compressFile(self, rootPath, directory =None):
        if not self._fileExists(rootPath):
            return False

        if self._compileCoffee:
            try:
                from pyaid.web.coffeescript.CoffeescriptBuilder import CoffeescriptBuilder
                CoffeescriptBuilder.compileAllOnPath(rootPath, os.path.dirname(rootPath), True)
                self._log.write('Coffeescript compiled.')
            except Exception, err:
                self._log.writeError('Failed to compile coffeescript file.', err)
                return False

        return self._compressFile(rootPath, directory)
Ejemplo n.º 5
0
class MarkupTag(object):
    """A class for..."""

#===================================================================================================
#                                                                                       C L A S S

    TAG      = ''

    _TAG_LIST                      = None
    _MARGIN_TOP_STYLE_ATTR_PATTERN = re.compile('margin-top:[^\'";]+')
    _STYLE_ATTR_PATTERN            = re.compile('style=(("[^"]*")|(\'[^\']*\'))')
    _TAG_INSERT_PATTERN            = re.compile('<[^>]+>')

#___________________________________________________________________________________________________ __init__
    def __init__(self, *args, **kwargs):
        """ Creates a new instance of MarkupTag.

            @@@param attributeSource:string
                If specified this will be used as the source attribute data for the tag. For
                parsed tags this will override the attribute data that was supplied in the actual
                tag definition text. However, in the procedural and/or independent cases where no
                attribute definition existed, this will take its place.
        """

        self._processor       = ArgsUtils.get('processor', None, kwargs, args, 0)
        self._block           = ArgsUtils.get('block', None, kwargs, args, 1)
        self._index           = ArgsUtils.get('index', 0, kwargs, args, 2)
        tagName               = ArgsUtils.get('tagName', None, kwargs, args, 3)
        self._procedural      = ArgsUtils.get('procedural', False, kwargs, args, 4)
        attributes            = ArgsUtils.get('attributes', None, kwargs, args, 5)
        self._independent     = ArgsUtils.get('independent', False, kwargs)
        self._attrData        = None
        self._attrsReady      = False
        self._voidTag         = ArgsUtils.get('void', None, kwargs)
        self._leafTag         = ArgsUtils.get('leaf', None, kwargs)
        self._isInsertsTag    = ArgsUtils.get('inserts', None, kwargs)
        self._passthruTag     = ArgsUtils.get('passthru', None, kwargs)
        self._renderOverride  = ArgsUtils.get('renderOverride', None, kwargs)
        self._renderTemplate  = ArgsUtils.get('renderTemplate', None, kwargs)
        self._replacementName = ArgsUtils.get('replacementName', None, kwargs)

        self._classMetadata  = {}
        self._errors         = []
        self._parent         = ArgsUtils.get('parent', None, kwargs)
        self._replacement    = ''
        self._offset         = 0

        self._name           = self.getClassAttr('TAG', '') if tagName is None else tagName.lower()

        if self._independent:
            self._log   = Logger(self)
            self._attrs = AttributeData(
                self,
                ArgsUtils.get('attributeSource', u'', kwargs),
                attributes=attributes)
        else:
            self._log   = self._processor.logger
            start       = self._block.start + (0 if self._procedural else len(self.tagName) + 3)
            self._attrs = AttributeData(
                self,
                ArgsUtils.get('attributeSource', u'', kwargs) if self._procedural else
                    self._processor.source[start:self._block.end-1],
                attributes=attributes)

#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: apiLevel
    @property
    def apiLevel(self):
        return 2

#___________________________________________________________________________________________________ GS: primaryAttribute
    @property
    def primaryAttribute(self):
        return self.getClassAttr('PRIMARY_ATTR', None)

#___________________________________________________________________________________________________ GS: isProcedural
    @property
    def isProcedural(self):
        return self._procedural

#___________________________________________________________________________________________________ GS: block
    @property
    def block(self):
        return self._block
    @block.setter
    def block(self, value):
        self._block = value

#___________________________________________________________________________________________________ GS: index
    @property
    def index(self):
        return self._index

#___________________________________________________________________________________________________ GS: processor
    @property
    def processor(self):
        return self._processor

#___________________________________________________________________________________________________ GS: isBlockDisplay
    @property
    def isBlockDisplay(self):
        return self.getClassAttr('BLOCK_DISPLAY', False)

#___________________________________________________________________________________________________ GS: isBlockTag
    @property
    def isBlockTag(self):
        return False

#___________________________________________________________________________________________________ GS: isVoidTag
    @property
    def isVoidTag(self):
        """ Specifies whether or not the tag is a void tag. Void tags render as an empty string and
            are useful for conditional rendering and hierarchical data management."""

        if self._voidTag is None:
            return self.getClassAttr('VOID_TAG', False)

        return self._voidTag

#___________________________________________________________________________________________________ GS: isLeafTag
    @property
    def isLeafTag(self):

        if self._leafTag is None:
            return self.getClassAttr('LEAF_TAG', False)

        return self._leafTag

#___________________________________________________________________________________________________ GS: isPassthruTag
    @property
    def isPassthruTag(self):

        if self._passthruTag is None:
            return self.getClassAttr('PASSTHRU_TAG', False)

        return self._passthruTag

#___________________________________________________________________________________________________ GS: isInsertsTag
    @property
    def isInsertsTag(self):

        if self._isInsertsTag is None:
            return self.getClassAttr('INSERTS_TAG', True)

        return self._isInsertsTag

#___________________________________________________________________________________________________ GS: tagName
    @property
    def tagName(self):
        return self._name

#___________________________________________________________________________________________________ GS: replacement
    @property
    def replacement(self):
        return self._replacement

#___________________________________________________________________________________________________ GS: attrs
    @property
    def attrs(self):
        return self._attrs

#___________________________________________________________________________________________________ GS: renderOffset
    @property
    def renderOffset(self):
        return self._offset

#___________________________________________________________________________________________________ GS: aheadCapPolicy
    @property
    def aheadCapPolicy(self):
        return self.getAttrFromClass('AHEAD_CAP_POLICY')

#___________________________________________________________________________________________________ GS: backCapPolicy
    @property
    def backCapPolicy(self):
        return self.getAttrFromClass('BACK_CAP_POLICY')

#___________________________________________________________________________________________________ GS: parent
    @property
    def parent(self):
        return self._parent
    @parent.setter
    def parent(self, value):
        self._parent = value

#___________________________________________________________________________________________________ GS: renderTemplate
    @property
    def renderTemplate(self):
        if self._renderTemplate is None:
            self._renderTemplate = self.getClassAttr('TEMPLATE', '')
        return self._renderTemplate
    @renderTemplate.setter
    def renderTemplate(self, value):
        self._renderTemplate = value

#___________________________________________________________________________________________________ GS: log
    @property
    def log(self):
        return self._log

#___________________________________________________________________________________________________ GS: replacementName
    @property
    def replacementName(self):
        return self._replacementName

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ getAttributeList
    @classmethod
    def getAttributeList(cls):
        t = TagAttributesEnum

        out = t.THEME + t.ID + t.HTML_CLASS + t.HTML_STYLE + t.HTML_DATA + t.ACCENTED + t.CLEAR + \
              t.GROUP + t.HTML_ATTR
        if cls.getAttrFromClass('PRIMARY_ATTR', None):
            out += t.VALUE
        return out

#___________________________________________________________________________________________________ clone
    def clone(self, tree=True, replacements=None, **kwargs):
        if replacements and self.replacementName:
            if not isinstance(replacements, list):
                replacements = [replacements]

            for r in replacements:
                if r.replacementName == self.replacementName:
                    return r

        return self._cloneImpl(**kwargs)

#___________________________________________________________________________________________________ getNonPassthruRootTag
    def getNonPassthruRootTag(self):
        if self.isPassthruTag:
            return None

        return self

#___________________________________________________________________________________________________ confirmClosed
    def confirmClosed(self):
        return True

#___________________________________________________________________________________________________ useBackground
    def useBackground(self):
        self.attrs.classes.add('v-S-bck', self.attrs.styleGroup)

#___________________________________________________________________________________________________ addError
    def addError(self, value):
        self._errors.append(value)

#___________________________________________________________________________________________________ makeRenderAttributes
    def makeRenderAttributes(self):
        # Don't allow the tags _renderImpl to be called multiple times
        if self._attrsReady:
            return self._attrData

        try:
            self._attrData   = self._renderImpl()
        except Exception, err:
            MarkupTagError(
                tag=self,
                errorDef=MarkupTagError.RENDER_FAILURE
            ).log()
            self._log.writeError([
                'Tag Render failure',
                'TAG' + str(self)
            ], err)
            return None

        self._attrsReady = True
        return self._attrData
Ejemplo n.º 6
0
class SocketHandler(SocketServer.StreamRequestHandler):
    """A class for..."""

    #===================================================================================================
    #                                                                                       C L A S S

    SERVICE_UID = 'test'
    VERBOSE = False
    WORK_PATH = '/var/lib/'
    RUN_PATH = '/var/run/'
    LOG_PATH = '/var/log/'

    #___________________________________________________________________________________________________ __init__
    def __init__(self, request, client_address, server):
        self._log = Logger(self)
        self._log.write('Socket handler created')

        SocketServer.StreamRequestHandler.__init__(self, request,
                                                   client_address, server)

#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: returnResponse

    @property
    def returnResponse(self):
        return getattr(self.__class__, 'RETURN_RESPONSE', False)

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ handle

    def handle(self):
        try:
            data = self.rfile.readline().strip()
            self._log.write('HANDLE: ' + str(data))
            try:
                result = self._respondImpl(JSON.fromString(unquote(data)))
            except Exception as err:
                self._log.writeError('RESPOND FAILURE', err)
                if self.returnResponse:
                    self.wfile.write(JSON.asString({'error': 1}))
                return

            if self.returnResponse:
                out = {'error': 0}
                if result:
                    out['payload'] = result
                self.wfile.write(out)
        except Exception as err:
            self._log.write('HANDLE FAILURE', err)

        return

#===================================================================================================
#                                                                               P R O T E C T E D

#___________________________________________________________________________________________________ _respondImpl

    def _respondImpl(self, data):
        pass
Ejemplo n.º 7
0
class TrackCsvImporter(object):
    """ Imports track data from CSV formatted spreadsheets into the local Cadence database. """

#===============================================================================
#                                                                                       C L A S S

    # Used to break trackway specifier into separate type and number entries
    _TRACKWAY_PATTERN = re.compile('(?P<type>[^0-9\s\t]+)[\s\t]*(?P<number>[^\(\s\t]+)')

    _UNDERPRINT_IGNORE_TRACKWAY_STR = ':UTW'
    _OVERPRINT_IGNORE_TRACKWAY_STR = ':OTW'

#_______________________________________________________________________________
    def __init__(self, path =None, logger =None):
        """Creates a new instance of TrackCsvImporter."""
        self._path = path

        self.created  = []
        self.modified = []

        self.fingerprints = dict()
        self.remainingTracks = dict()
        self._logger  = logger
        if not logger:
            self._logger = Logger(self, printOut=True)

#===============================================================================
#                                                                                     P U B L I C

#_______________________________________________________________________________
    def read(self, session, analysisSession, path =None, compressed =False):
        """ Reads from the spreadsheet located at the absolute path argument and adds each row
            to the tracks in the database. """

        if path is not None:
            self._path = path
        if self._path is None:
            return False

        model = Tracks_Track.MASTER
        for existingTrack in session.query(model).all():
            self.remainingTracks[existingTrack.uid] = existingTrack.fingerprint

        try:
            data = pd.read_csv(self._path)
        except Exception as err:
            self._writeError({
                'message':'ERROR: Unable to read CSV file "%s"' % self._path,
                'error':err })
            return

        if data is None:
            self._writeError({
                'message':'ERROR: Failed to create CSV reader for file "%s"' % self._path })
            return

        for index, row in data.iterrows():
            # Skip any rows that don't start with the proper numeric index value, which
            # includes the header row (if it exists) with the column names
            try:
                index = int(row[0])
            except Exception:
                continue

            rowDict = dict()
            for column in Reflection.getReflectionList(TrackCsvColumnEnum):
                value = row[column.index]

                if value and StringUtils.isStringType(value) and not StringUtils.isTextType(value):
                    # Try to decode the value into a unicode string using common codecs
                    for codec in ['utf8', 'MacRoman', 'utf16']:
                        try:
                            decodedValue = value.decode(codec)
                            if decodedValue:
                                value = decodedValue
                                break
                        except Exception:
                            continue

                try:
                    # Check to see if the value is NaN, and if it is replace it with an empty
                    # string to be ignored during import
                    value = '' if np.isnan(value) else value
                except Exception:
                    pass

                if value != '' or value is None:
                    rowDict[column.name] = value

            self.fromSpreadsheetEntry(rowDict, session)

        for uid, fingerprint in DictUtils.iter(self.remainingTracks):
            # Iterate through the list of remaining tracks, which are tracks not found by the
            # importer. If the track is marked as custom (meaning it is not managed by the importer)
            # it is ignored. Otherwise, the track is deleted from the database as a track that no
            # longer exists.

            track = Tracks_Track.MASTER.getByUid(uid, session)
            if track.custom:
                continue

            Tracks_Track.removeTrack(track, analysisSession)
            self._logger.write('[REMOVED]: No longer exists "%s" (%s)' % (
                track.fingerprint, track.uid))

        session.flush()

        for track in self.created:
            self._logger.write('[CREATED]: "%s" (%s)' % (track.fingerprint, track.uid))

        return True

#_______________________________________________________________________________
    def fromSpreadsheetEntry(self, csvRowData, session):
        """ From the spreadsheet data dictionary representing raw track data, this method creates
            a track entry in the database. """

        #-------------------------------------------------------------------------------------------
        # MISSING
        #       Try to determine if the missing value has been set for this row data. If so and it
        #       has been marked missing, skip the track during import to prevent importing tracks
        #       with no data.
        try:
            missingValue = csvRowData[TrackCsvColumnEnum.MISSING.name].strip()
            if missingValue:
                return False
        except Exception:
            pass

        try:
            csvIndex = int(csvRowData[TrackCsvColumnEnum.INDEX.name])
        except Exception:
            self._writeError({
                'message':'Missing spreadsheet index',
                'data':csvRowData })
            return False

        model = Tracks_Track.MASTER
        t = model()
        t.importFlags = 0
        t.index = csvIndex

        #-------------------------------------------------------------------------------------------
        # SITE
        try:
            t.site = csvRowData.get(TrackCsvColumnEnum.TRACKSITE.name).strip().upper()
        except Exception:
            self._writeError({
                'message':'Missing track site',
                'data':csvRowData,
                'index':csvIndex })
            return False

        #-------------------------------------------------------------------------------------------
        # SECTOR
        try:
            t.sector = csvRowData.get(TrackCsvColumnEnum.SECTOR.name).strip().upper()
        except Exception:
            self._writeError({
                'message':'Missing sector',
                'data':csvRowData,
                'index':csvIndex })
            return False

        #-------------------------------------------------------------------------------------------
        # LEVEL
        try:
            t.level = csvRowData.get(TrackCsvColumnEnum.LEVEL.name)
        except Exception:
            self._writeError({
                'message':'Missing level',
                'data':csvRowData,
                'index':csvIndex })
            return False

        #-------------------------------------------------------------------------------------------
        # TRACKWAY
        #       Parse the trackway entry into type and number values. In the process illegal
        #       characters are removed to keep the format something that can be handled correctly
        #       within the database.
        try:
            test = csvRowData.get(TrackCsvColumnEnum.TRACKWAY.name).strip().upper()
        except Exception:
            self._writeError({
                'message':'Missing trackway',
                'data':csvRowData,
                'index':csvIndex })
            return False

        # If the trackway contains an ignore pattern then return without creating the track.
        # This is used for tracks in the record that are actually under-prints from a higher
        # level recorded in the spreadsheet only for catalog reference.
        testIndexes = [
            test.find(self._UNDERPRINT_IGNORE_TRACKWAY_STR),
            test.find(self._OVERPRINT_IGNORE_TRACKWAY_STR) ]

        testParensIndex = test.find('(')
        for testIndex in testIndexes:
            if testIndex != -1 and (testParensIndex == -1 or testParensIndex > testIndex):
                return False

        result = self._TRACKWAY_PATTERN.search(test)
        try:
            t.trackwayType   = result.groupdict()['type'].upper().strip()
            t.trackwayNumber = result.groupdict()['number'].upper().strip()
        except Exception:
            self._writeError({
                'message':'Invalid trackway value: %s' % test,
                'data':csvRowData,
                'result':result,
                'match':result.groupdict() if result else 'N/A',
                'index':csvIndex })
            return False

        #-------------------------------------------------------------------------------------------
        # NAME
        #       Parse the name value into left, pes, and number attributes
        try:
            t.name = csvRowData.get(TrackCsvColumnEnum.TRACK_NAME.name).strip()
        except Exception:
            self._writeError({
                'message':'Missing track name',
                'data':csvRowData,
                'index':csvIndex })
            return False

        #-------------------------------------------------------------------------------------------
        # YEAR
        try:
            year = csvRowData.get(TrackCsvColumnEnum.MEASURED_DATE.name)

            if not year:
                year = '2014'
            else:

                try:
                    y = StringUtils.toText(year).split(';')[-1].strip().replace(
                        '/', '_').replace(
                        ' ', '').replace(
                        '-', '_').split('_')[-1]
                    year = int(re.compile('[^0-9]+').sub('', y))
                except Exception:
                    year = 2014

                if year > 2999:
                    # When multiple year entries combine into a single large number
                    year = int(StringUtils.toUnicode(year)[-4:])
                elif year < 2000:
                    # When two digit years (e.g. 09) are used instead of four digit years
                    year += 2000

                year = StringUtils.toUnicode(year)

            t.year = year
        except Exception:
            self._writeError({
                'message':'Missing cast date',
                'data':csvRowData,
                'index':csvIndex })
            return False

        #-------------------------------------------------------------------------------------------
        # FIND EXISTING
        #       Use data set above to attempt to load the track database entry
        fingerprint = t.fingerprint

        for uid, fp in DictUtils.iter(self.remainingTracks):
            # Remove the fingerprint from the list of fingerprints found in the database, which at
            # the end will leave only those fingerprints that exist in the database but were not
            # touched by the importer. These values can be used to identify tracks that should
            # have been "touched" but were not.
            if fp == fingerprint:
                del self.remainingTracks[uid]
                break

        existing = t.findExistingTracks(session)
        if existing and not isinstance(existing, Tracks_Track):
            existing = existing[0]

        if fingerprint in self.fingerprints:
            if not existing:
                existing = self.fingerprints[fingerprint]

            self._writeError({
                'message':'Ambiguous track entry "%s" [%s -> %s]' % (
                    fingerprint, csvIndex, existing.index),
                'data':csvRowData,
                'existing':existing,
                'index':csvIndex })
            return False

        self.fingerprints[fingerprint] = t

        if existing:
            t = existing
        else:
            session.add(t)
            session.flush()

        TCCE = TrackCsvColumnEnum
        IFE  = ImportFlagsEnum

        #-------------------------------------------------------------------------------------------
        # CSV PROPERTY CLEANUP
        #       Cleanup and format additional CSV values before saving the csv data to the track's
        #       snapshot.
        removeNonColumns = [
            TrackCsvColumnEnum.PRESERVED.name,
            TrackCsvColumnEnum.CAST.name,
            TrackCsvColumnEnum.OUTLINE_DRAWING.name]
        for columnName in removeNonColumns:
            if columnName in csvRowData:
                testValue = StringUtils.toText(csvRowData[columnName]).strip().upper()
                if testValue.startswith('NON'):
                    del csvRowData[columnName]

        # Create a snapshot that only includes a subset of properties that are flagged to be
        # included in the database snapshot entry
        snapshot = dict()
        for column in Reflection.getReflectionList(TrackCsvColumnEnum):
            # Include only values that are marked in the enumeration as to be included
            if not column.snapshot or column.name not in csvRowData:
                continue

            value = csvRowData.get(column.name)
            if value is None:
                continue
            elif not value is StringUtils.isStringType(value):
                value = StringUtils.toText(value)

            value = StringUtils.toText(value).strip()
            if value in ['-', b'\xd0'.decode(b'MacRoman')]:
                continue

            snapshot[column.name] = value

        #-------------------------------------------------------------------------------------------
        # WIDTH
        #       Parse the width into a numerical value and assign appropriate default uncertainty
        try:
            t.widthMeasured = 0.01*float(self._collapseManusPesProperty(
                t, csvRowData,
                TCCE.PES_WIDTH, TCCE.PES_WIDTH_GUESS,
                TCCE.MANUS_WIDTH, TCCE.MANUS_WIDTH_GUESS,
                '0', IFE.HIGH_WIDTH_UNCERTAINTY, IFE.NO_WIDTH ))

            t.widthMeasured = t.widthMeasured

            if not existing or t.widthUncertainty == 0:
                t.widthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_WIDTH_UNCERTAINTY) else 0.03

        except Exception as err:
            print(Logger().echoError('WIDTH PARSE ERROR:', err))
            self._writeError({
                'message':'Width parse error',
                'data':csvRowData,
                'error':err,
                'index':csvIndex })

            t.widthMeasured = 0.0
            if not existing:
                t.widthUncertainty = 0.05

        #-------------------------------------------------------------------------------------------
        # LENGTH
        #       Parse the length into a numerical value and assign appropriate default uncertainty
        try:
            t.lengthMeasured = 0.01*float(self._collapseManusPesProperty(
                t, csvRowData,
                TCCE.PES_LENGTH, TCCE.PES_LENGTH_GUESS,
                TCCE.MANUS_LENGTH, TCCE.MANUS_LENGTH_GUESS,
                '0', IFE.HIGH_LENGTH_UNCERTAINTY, IFE.NO_LENGTH ))

            t.lengthMeasured = t.lengthMeasured

            if not existing or t.lengthUncertainty == 0:
                t.lengthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_LENGTH_UNCERTAINTY) else 0.03

        except Exception as err:
            print(Logger().echoError('LENGTH PARSE ERROR:', err))
            self._writeError({
                'message':'Length parse error',
                'data':csvRowData,
                'error':err,
                'index':csvIndex })

            t.lengthMeasured = 0.0
            if not existing:
                t.lengthUncertainty = 0.05

        #-------------------------------------------------------------------------------------------
        # DEPTH
        #       Parse the depth into a numerical value and assign appropriate default uncertainty
        try:
            t.depthMeasured = 0.01*float(self._collapseManusPesProperty(
                t, csvRowData,
                TCCE.PES_DEPTH, TCCE.PES_DEPTH_GUESS,
                TCCE.MANUS_DEPTH, TCCE.MANUS_DEPTH_GUESS,
                '0', IFE.HIGH_DEPTH_UNCERTAINTY, 0 ))

            if not existing or t.depthUncertainty == 0:
                t.depthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_DEPTH_UNCERTAINTY) else 0.03

        except Exception as err:
            print(Logger().echoError('DEPTH PARSE ERROR:', err))
            t.depthMeasured = 0.0
            if not existing:
                t.depthUncertainty = 0.05

        #-------------------------------------------------------------------------------------------
        # ROTATION
        #       Parse the rotation into a numerical value and assign appropriate default uncertainty
        try:
            t.rotationMeasured = float(self._collapseLimbProperty(
                t, csvRowData,
                TCCE.LEFT_PES_ROTATION, TCCE.LEFT_PES_ROTATION_GUESS,
                TCCE.RIGHT_PES_ROTATION, TCCE.RIGHT_PES_ROTATION_GUESS,
                TCCE.LEFT_MANUS_ROTATION, TCCE.LEFT_MANUS_ROTATION_GUESS,
                TCCE.RIGHT_MANUS_ROTATION, TCCE.RIGHT_MANUS_ROTATION_GUESS,
                '0', IFE.HIGH_ROTATION_UNCERTAINTY, 0 ))

            if not existing or t.rotationUncertainty == 0:
                t.rotationUncertainty = \
                    10.0 if (t.importFlags & IFE.HIGH_ROTATION_UNCERTAINTY) else 45.0

        except Exception as err:
            print(Logger().echoError('ROTATION PARSE ERROR:', err))
            self._writeError({
                'message':'Rotation parse error',
                'error':err,
                'data':csvRowData,
                'index':csvIndex })

            t.rotationMeasured  = 0.0
            if not existing:
                t.rotationUncertainty = 45.0

        #-------------------------------------------------------------------------------------------
        # STRIDE
        try:
            strideLength = self._collapseManusPesProperty(
                t, csvRowData,
                TCCE.PES_STRIDE, TCCE.PES_STRIDE_GUESS,
                TCCE.MANUS_STRIDE, TCCE.MANUS_STRIDE_GUESS,
                None, IFE.HIGH_STRIDE_UNCERTAINTY )

            strideFactor = self._collapseManusPesProperty(
                t, csvRowData,
                TCCE.PES_STRIDE_FACTOR, None,
                TCCE.MANUS_STRIDE_FACTOR, None, 1.0)

            if strideLength:
                snapshot[SnapshotDataEnum.STRIDE_LENGTH] = 0.01*float(strideLength)*float(strideFactor)
        except Exception as err:
            print(Logger().echoError('STRIDE PARSE ERROR:', err))

        #-------------------------------------------------------------------------------------------
        # WIDTH ANGULATION PATTERN
        try:
            widthAngulation = self._collapseManusPesProperty(
                t, csvRowData,
                TCCE.WIDTH_PES_ANGULATION_PATTERN, TCCE.WIDTH_PES_ANGULATION_PATTERN_GUESS,
                TCCE.WIDTH_MANUS_ANGULATION_PATTERN, TCCE.WIDTH_MANUS_ANGULATION_PATTERN_GUESS,
                None, IFE.HIGH_WIDTH_ANGULATION_UNCERTAINTY )

            if widthAngulation:
                snapshot[SnapshotDataEnum.WIDTH_ANGULATION_PATTERN] = 0.01*float(widthAngulation)
        except Exception as err:
            print(Logger().echoError('WIDTH ANGULATION PARSE ERROR:', err))

        #-------------------------------------------------------------------------------------------
        # PACE
        try:
            pace = self._collapseLimbProperty(
                t, csvRowData,
                TCCE.LEFT_PES_PACE, TCCE.LEFT_PES_PACE_GUESS,
                TCCE.RIGHT_PES_PACE, TCCE.RIGHT_PES_PACE_GUESS,
                TCCE.LEFT_MANUS_PACE, TCCE.LEFT_MANUS_PACE_GUESS,
                TCCE.RIGHT_MANUS_PACE, TCCE.RIGHT_MANUS_PACE_GUESS,
                None, IFE.HIGH_PACE_UNCERTAINTY )

            if pace:
                snapshot[SnapshotDataEnum.PACE] = 0.01*float(pace)
        except Exception as err:
            print(Logger().echoError('PACE PARSE ERROR:', err))

        #-------------------------------------------------------------------------------------------
        # PACE ANGULATION PATTERN
        try:
            paceAngulation = self._collapseManusPesProperty(
                t, csvRowData,
                TCCE.PES_PACE_ANGULATION, TCCE.PES_PACE_ANGULATION_GUESS,
                TCCE.MANUS_PACE_ANGULATION, TCCE.MANUS_PACE_ANGULATION_GUESS,
                None, IFE.HIGH_WIDTH_ANGULATION_UNCERTAINTY )

            if paceAngulation:
                snapshot[SnapshotDataEnum.PACE_ANGULATION_PATTERN] = float(paceAngulation)
        except Exception as err:
            print(Logger().echoError('PACE ANGULATION PARSE ERROR:', err))

        #-------------------------------------------------------------------------------------------
        # PROGRESSION
        try:
            progression = self._collapseLimbProperty(
                t, csvRowData,
                TCCE.LEFT_PES_PROGRESSION, TCCE.LEFT_PES_PROGRESSION_GUESS,
                TCCE.RIGHT_PES_PROGRESSION, TCCE.RIGHT_PES_PROGRESSION_GUESS,
                TCCE.LEFT_MANUS_PROGRESSION, TCCE.LEFT_MANUS_PROGRESSION_GUESS,
                TCCE.RIGHT_MANUS_PROGRESSION, TCCE.RIGHT_MANUS_PROGRESSION_GUESS,
                None, IFE.HIGH_PROGRESSION_UNCERTAINTY )

            if progression:
                snapshot[SnapshotDataEnum.PROGRESSION] = 0.01*float(progression)
        except Exception as err:
            print(Logger().echoError('PROGRESSION PARSE ERROR:', err))

        #-------------------------------------------------------------------------------------------
        # GLENO-ACETABULAR DISTANCE
        try:
            gad = self._collapseGuessProperty(
                t, csvRowData,
                TCCE.GLENO_ACETABULAR_DISTANCE, TCCE.GLENO_ACETABULAR_DISTANCE_GUESS,
                None, IFE.HIGH_GLENO_ACETABULAR_UNCERTAINTY )

            if gad:
                snapshot[SnapshotDataEnum.GLENO_ACETABULAR_LENGTH] = 0.01*float(gad)
        except Exception as err:
            print(Logger().echoError('GLENO-ACETABULAR DISTANCE PARSE ERROR:', err))

        # Save the snapshot
        try:
            t.snapshot = JSON.asString(snapshot)
        except Exception:
            raise

        if TrackCsvColumnEnum.MEASURED_BY.name not in snapshot:
            # Mark entries that have no field measurements with a flag for future reference
            t.importFlags |= ImportFlagsEnum.NO_FIELD_MEASUREMENTS

        if existing:
            self.modified.append(t)
        else:
            self.created.append(t)

        return t

#_______________________________________________________________________________
    def _writeError(self, data):
        """ Writes import error data to the logger, formatting it for human readable display. """
        source = {}

        if 'data' in data:
            for n,v in DictUtils.iter(data['data']):
                source[' '.join(n.split('_')).title()] = v

        indexPrefix = ''
        if 'index' in data:
            indexPrefix = ' [INDEX: %s]:' % data.get('index', 'Unknown')

        result  = [
            'IMPORT ERROR%s: %s' % (indexPrefix, data['message']),
            'DATA: ' + DictUtils.prettyPrint(source)]

        if 'existing' in data:
            source = {}
            snapshot = data['existing'].snapshot
            if snapshot:
                snapshot = JSON.fromString(snapshot)
            if snapshot:
                for n,v in DictUtils.iter(snapshot):
                    source[' '.join(n.split('_')).title()] = v
            result.append('CONFLICT: ' + DictUtils.prettyPrint(source))

        if 'error' in data:
            self._logger.writeError(result, data['error'])
        else:
            self._logger.write(result)

#_______________________________________________________________________________
    @classmethod
    def _getStrippedValue(cls, value):
        try:
            return value.strip()
        except Exception:
            return value

#_______________________________________________________________________________
    @classmethod
    def _getStrippedRowData(cls, source, trackCsvEnum):
        out = source.get(trackCsvEnum.name)
        try:
            return out.strip()
        except Exception:
            return out

#_______________________________________________________________________________
    @classmethod
    def _collapseManusPesProperty(
            cls, track, csvRowData, pesEnum, pesGuessEnum, manusEnum, manusGuessEnum,
            defaultValue, guessFlag =0, missingFlag =0
    ):

        if track.pes:
            return cls._collapseGuessProperty(
                track=track,
                csvRowData=csvRowData,
                regularPropertyEnum=pesEnum,
                guessPropertyEnum=pesGuessEnum,
                defaultValue=defaultValue,
                guessFlag=guessFlag,
                missingFlag=missingFlag)
        else:
            return cls._collapseGuessProperty(
                track=track,
                csvRowData=csvRowData,
                regularPropertyEnum=manusEnum,
                guessPropertyEnum=manusGuessEnum,
                defaultValue=defaultValue,
                guessFlag=guessFlag,
                missingFlag=missingFlag)

#_______________________________________________________________________________
    @classmethod
    def _collapseLimbProperty(
            cls, track, csvRowData, lpEnum, lpGuessEnum, rpEnum, rpGuessEnum, lmEnum, lmGuessEnum,
            rmEnum, rmGuessEnum, defaultValue, guessFlag =0, missingFlag =0
    ):

        if track.pes and track.left:
            return cls._collapseGuessProperty(
                track, csvRowData, lpEnum, lpGuessEnum, defaultValue, guessFlag, missingFlag)
        elif track.pes and not track.left:
            return cls._collapseGuessProperty(
                track, csvRowData, rpEnum, rpGuessEnum, defaultValue, guessFlag, missingFlag)
        elif not track.pes and track.left:
            return cls._collapseGuessProperty(
                track, csvRowData, lmEnum, lmGuessEnum, defaultValue, guessFlag, missingFlag)
        elif not track.pes and not track.left:
            return cls._collapseGuessProperty(
                track, csvRowData, rmEnum, rmGuessEnum, defaultValue, guessFlag, missingFlag)
        else:
            return None

#_______________________________________________________________________________
    @classmethod
    def _collapseGuessProperty(
            cls, track, csvRowData, regularPropertyEnum, guessPropertyEnum, defaultValue,
            guessFlag =0, missingFlag =0
    ):
        value = cls._getStrippedRowData(csvRowData, regularPropertyEnum)
        if guessPropertyEnum:
            valueGuess = cls._getStrippedRowData(csvRowData, guessPropertyEnum)
        else:
            valueGuess = None

        if not value:
            if not valueGuess:
                track.importFlags |= (missingFlag & guessFlag)
                return defaultValue

            track.importFlags |= guessFlag
            return valueGuess

        return value

#===============================================================================
#                                                                               I N T R I N S I C

#_______________________________________________________________________________
    def __repr__(self):
        return self.__str__()

#_______________________________________________________________________________
    def __unicode__(self):
        return StringUtils.toUnicode(self.__str__())

#_______________________________________________________________________________
    def __str__(self):
        return '<%s>' % self.__class__.__name__
Ejemplo n.º 8
0
class IncludeCompressor(object):

    #===================================================================================================
    #                                                                                       C L A S S

    _REMOVE_COMMENT_RE = re.compile('/\*.+\*/', re.DOTALL)
    _REMOVE_COMMENT_LINE_RE = re.compile('(^|\n)[\s\t]*//.+(\n|$)')

    JS_TYPE = 'js'
    CSS_TYPE = 'css'

    #___________________________________________________________________________________________________ __init__
    def __init__(self, compileCoffee=False):
        self._log = Logger('IncludeCompressor')
        self._compileCoffee = compileCoffee

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ compress

    def compress(self, rootPath):
        if not self._fileExists(rootPath):
            return False
        elif os.path.isfile(rootPath):
            return self.compressFile(rootPath)
        else:
            return self.compressPath(rootPath)

#___________________________________________________________________________________________________ compressFile

    def compressFile(self, rootPath, directory=None):
        if not self._fileExists(rootPath):
            return False

        if self._compileCoffee:
            try:
                from pyaid.web.coffeescript.CoffeescriptBuilder import CoffeescriptBuilder
                CoffeescriptBuilder.compileAllOnPath(rootPath,
                                                     os.path.dirname(rootPath),
                                                     True)
                self._log.write('Coffeescript compiled.')
            except Exception as err:
                self._log.writeError('Failed to compile coffeescript file.',
                                     err)
                return False

        return self._compressFile(rootPath, directory)

#___________________________________________________________________________________________________ compressPath

    def compressPath(self, rootPath):
        # First compile any coffee scripts to js files
        if self._compileCoffee:
            try:
                from pyaid.web.coffeescript.CoffeescriptBuilder import CoffeescriptBuilder
                CoffeescriptBuilder.compileAllOnPath(rootPath, rootPath, True)
                self._log.write('Coffee scripts compiled.')
            except Exception as err:
                self._log.writeError('Failed to compile coffeescript files.',
                                     err)
                return False

        FileUtils.walkPath(rootPath, self._compressInFolder, None)
        self._log.write('Compression operation complete.')
        return True

#===================================================================================================
#                                                                               P R O T E C T E D

#___________________________________________________________________________________________________ _fileExists

    def _fileExists(self, rootPath):
        if not os.path.exists(rootPath):
            self._log.write('ERROR: [%s] does not exist. Operation aborted.' %
                            rootPath)
            return False

        return True

#___________________________________________________________________________________________________ _compressFile

    def _compressFile(self, target, directory):
        # Skip compiled files.
        if target.endswith('comp.js') or target.endswith('comp.css'):
            return False

        if target.endswith('.js'):
            fileType = IncludeCompressor.JS_TYPE
        elif target.endswith('.css'):
            fileType = IncludeCompressor.CSS_TYPE
        else:
            return False

        if not directory:
            directory = ''
        if not directory.endswith(os.sep) and not target.startswith(os.sep):
            directory += os.sep

        inFile = directory + target
        tempFile = directory + target + '.temp'

        try:
            fh = open(inFile, 'r')
            fileString = fh.read()
            fh.close()
        except Exception as err:
            self._log.writeError('FAILED: Unable to read ' + str(inFile), err)
            return False

        if fileType == IncludeCompressor.CSS_TYPE:
            fileString = fileString.replace('@charset "utf-8";', '')
            ofn = (target[0:-3] + 'comp.css')
        else:
            ofn = (target[0:-2] + 'comp.js')

        try:
            fh = open(tempFile, 'w')
            fh.write(fileString)
            fh.close()
        except Exception as err:
            self._log.writeError(
                'FAILED: Unable to write temp file ' + str(tempFile), err)
            return False

        outFile = directory + '/' + ofn

        cmd = ['minify', '"%s"' % tempFile, '"%s"' % outFile]
        result = SystemUtils.executeCommand(cmd)
        if result['code']:
            self._log.write('FAILED: Unable to compress ' + str(inFile))

        if os.path.exists(tempFile):
            os.remove(tempFile)

        if not os.path.exists(outFile):
            self._log.write('FAILED: ' + target + ' -> ' + ofn)
            return False
        elif fileType == IncludeCompressor.JS_TYPE:
            f = open(outFile, 'r')
            compressed = f.read()
            f.close()

            compressed = IncludeCompressor._REMOVE_COMMENT_RE.sub(
                '', compressed)
            compressed = IncludeCompressor._REMOVE_COMMENT_LINE_RE.sub(
                '', compressed)

            f = open(outFile, 'w')
            f.write(compressed.strip())
            f.close()

        inSize = SizeUnits.SizeConversion.bytesToKilobytes(inFile, 2)
        outSize = SizeUnits.SizeConversion.bytesToKilobytes(outFile, 2)
        saved = SizeUnits.SizeConversion.convertDelta(
            inSize, outSize, SizeUnits.SIZES.KILOBYTES, 2)

        self._log.write(
            'Compressed[%s]: %s -> %s [%sKB -> %sKB | Saved: %sKB]' %
            (fileType, target, ofn, inSize, outSize, saved))

        return True

#___________________________________________________________________________________________________ _compressInFolder

    def _compressInFolder(self, dumb, directory, names):
        if directory.find('.svn') != -1:
            return

        for fn in names:
            self._compressFile(fn, directory)
Ejemplo n.º 9
0
def runPythonExec(script, kwargs=None):
    from nimble.NimbleEnvironment import NimbleEnvironment
    from nimble.data.NimbleResponseData import NimbleResponseData
    from nimble.data.enum.DataKindEnum import DataKindEnum

    try:
        nimble.cmds.undoInfo(openChunk=True)
    except Exception as err:
        return False

    try:
        # Create a new, temporary module in which to run the script
        module = imp.new_module('runExecTempModule')

        # Initialize the script with script inputs
        setattr(module, NimbleEnvironment.REMOTE_KWARGS_KEY,
                kwargs if kwargs is not None else dict())
        setattr(module, NimbleEnvironment.REMOTE_RESULT_KEY, dict())

        # Executes the script in the new module
        exec_(script, module.__dict__)

        # Find a NimbleScriptBase derived class definition and if it exists, run it to populate the
        # results
        for name, value in Reflection.getReflectionDict(module).iteritems():
            if not inspect.isclass(value):
                continue

            if NimbleScriptBase in value.__bases__:
                getattr(module, name)().run()
                break

        # Retrieve the results object that contains all results set by the execution of the script
        result = getattr(module, NimbleEnvironment.REMOTE_RESULT_KEY)
    except Exception as err:
        logger = Logger('runPythonExec', printOut=True)
        logger.writeError('ERROR: Failed Remote Script Execution', err)
        result = NimbleResponseData(
            kind=DataKindEnum.PYTHON_SCRIPT,
            response=NimbleResponseData.FAILED_RESPONSE,
            error=str(err))

    # If a result dictionary contains an error key format the response as a failure
    try:
        errorMessage = ArgsUtils.extract(
            NimbleEnvironment.REMOTE_RESULT_ERROR_KEY, None, result)
        if errorMessage:
            return NimbleResponseData(
                kind=DataKindEnum.PYTHON_SCRIPT,
                response=NimbleResponseData.FAILED_RESPONSE,
                error=errorMessage,
                payload=result)
    except Exception as err:
        pass

    try:
        nimble.cmds.undoInfo(closeChunk=True)
    except Exception as err:
        return False

    return result
Ejemplo n.º 10
0
class CoffeescriptBuilder(object):
    """A class for..."""

    CLASS_PATTERN = "^[\s\t]*class[\s\t]+(?P<class>[^\s\t\r\n]+)[\s\t]*"
    MISSING_CLASS_PATTERN = "[\s\t\(\[\{\!]+(?=[A-Z])(?P<class>[A-Za-z0-9_]+)(?P<next>[^A-Za-z0-9_]+)"

    _WARN_ID_MISSING_IMPORT = "MISSING-IMPORT"

    _GLOBAL_CLASSES = [
        "SFLOW",
        "PAGE",
        "FB",
        "Math",
        "JSON",
        "String",
        "ActiveXObject",
        "Date",
        "DOMParser",
        "RegExp",
        "Object",
        "Number",
        "Array",
        "Function",
        "XMLHttpRequest",
    ]

    _results = None
    _missing = None

    # ===================================================================================================
    #                                                                                       C L A S S

    # ___________________________________________________________________________________________________ __init__
    def __init__(
        self,
        targetPackageOrPath,
        rootPath,
        verbose=True,
        debug=False,
        trace=False,
        force=False,
        compress=False,
        buildOnly=False,
    ):
        """Creates a new instance of CoffeescriptBuilder."""

        self.buildOnly = buildOnly

        self._imports = dict()
        self._requires = dict()
        self._includes = dict()
        self._report = dict()
        self._warnings = []
        self._dependencyReport = dict()
        self._verbose = verbose
        self._log = Logger(self, printOut=True)
        self._trace = trace
        self._debug = debug
        self._targets = []
        self._force = force
        self._compress = compress
        self._rootPath = rootPath

        if not isinstance(targetPackageOrPath, CoffeescriptDependency):
            target = CoffeescriptDependency(targetPackageOrPath, rootPath, None)
        else:
            target = targetPackageOrPath

        if target.exists:
            self._targets.append(target)
        else:
            csFiles = CoffeescriptBuilder.getScriptsInPath(target.packagePath)

            # Look for exec matches first
            for f in csFiles:
                testTarget = CoffeescriptDependency(f, rootPath, None)
                if testTarget.isExec:
                    self._targets.append(testTarget)

            # Look for lib matches second. Lib matches are tested as a second pass because
            # constructing all exec files first potentially optimizes the import process for
            # the libraries.
            for f in csFiles:
                testTarget = CoffeescriptDependency(f, rootPath, None)
                if testTarget.isLib:
                    self._targets.append(testTarget)

        if len(self._targets) == 0:
            print("\n\n")
            self._log.write("No targets exist for: %s. Compilation aborted." % targetPackageOrPath)
            print("\n")

    # ===================================================================================================
    #                                                                                   G E T / S E T

    # ___________________________________________________________________________________________________ GS: report
    @property
    def report(self):
        return self._report

    # ___________________________________________________________________________________________________ GS: warnings
    @property
    def warnings(self):
        return self._warnings

    # ___________________________________________________________________________________________________ GS: imports
    @property
    def imports(self):
        return self._imports

    # ___________________________________________________________________________________________________ GS: requires
    @property
    def requires(self):
        return self._requires

    # ___________________________________________________________________________________________________ GS: includes
    @property
    def includes(self):
        return self._includes

    # ===================================================================================================
    #                                                                                     P U B L I C

    # ___________________________________________________________________________________________________ construct
    def construct(self):
        """Doc..."""
        for t in self._targets:
            self._report[t.package] = -1
            if t.isLib:
                self._constructLibrary(t)
            else:
                self._constructTarget(t)

            if self._compress:
                print("COMPRESSING:", t.package)
                from pyaid.web.coffeescript.IncludeCompressor import IncludeCompressor

                ic = IncludeCompressor()
                if not ic.compressFile(t.compiledPath):
                    print("COMPRESSION FAILURE:", t.compiledPath)

        return self._targets

    # ___________________________________________________________________________________________________ compileAllOnPath
    @staticmethod
    def compileAllOnPath(path, rootPath=None, recursive=False, debug=False, trace=False, force=False, compress=False):

        CoffeescriptBuilder._results = ""
        CoffeescriptBuilder._missing = {}
        if recursive:
            print("RECURSIVE COMPILE AT: " + path)

            def walker(paths, dirName, names):
                out = CoffeescriptBuilder._compileAllInDirectory(
                    os.path.join(paths[0], dirName), paths[1], debug=debug, trace=trace, force=force, compress=compress
                )
                CoffeescriptBuilder._results += out["res"]
                for n, v in DictUtils.iter(out["missing"]):
                    if n in CoffeescriptBuilder._missing:
                        continue
                    CoffeescriptBuilder._missing[n] = v

            FileUtils.walkPath(path, walker, [path, rootPath])
            print("\n\nCOMPILATION RESULTS:" + CoffeescriptBuilder._results)

            if CoffeescriptBuilder._missing:
                print("\n\nMISSING IMPORTS:" + "\n\n")
                for n, v in DictUtils.iter(CoffeescriptBuilder._missing):
                    print(v["class"] + " [LINE: #" + str(v["line"]) + " | " + v["package"] + "]")
        else:
            print("COMPILING DIRECTORY: " + path)
            CoffeescriptBuilder._compileAllInDirectory(
                path, rootPath, debug=debug, trace=trace, force=force, compress=compress
            )

    # ___________________________________________________________________________________________________ getScriptsInPath
    @staticmethod
    def getScriptsInPath(path):
        files = []

        for f in os.listdir(path):
            if f.lower().endswith("." + CoffeescriptDependency.EXTENSION):
                files.append(os.path.join(path, f))

        return files

    # ===================================================================================================
    #                                                                               P R O T E C T E D

    # ___________________________________________________________________________________________________ _constructLibrary
    def _constructLibrary(self, target):
        try:
            if self._verbose:
                print("\n\n" + ("-" * 100) + "\n")
                self._log.add("LIBRARY: %s\n\tsource: %s\n\troot: %s" % (target.package, target.path, target.rootPath))

            # ---------------------------------------------------------------------------------------
            # Compile all includes using library data
            targets, imports, modules, includes = self._getLibraryData(target)

            # Process requires for all of the targets
            for t in targets + imports + modules:
                self._processRequires(t)

            # ---------------------------------------------------------------------------------------
            # IMPORTS

            # Compile all excludes skipping any exec or lib files that are listed in the import
            # statements.
            importExcludes = []
            for t in targets:
                for imp in self._imports[t.package]:
                    if not (imp.isExec or imp.isLib or imp.isInList(importExcludes)):
                        importExcludes.append(imp)

            # Compile all imports needed for the library. Any excludes are added to the shared
            # library to be made accessible via the VIZME registry.
            libImports = []
            sharedImports = []
            for t in imports + modules:
                for imp in self.imports[t.package]:
                    if not imp.isInList(libImports):
                        if imp.isInList(importExcludes):
                            if not imp.isInList(sharedImports):
                                sharedImports.append(imp)
                        else:
                            libImports.append(imp)
            libImports.append(target)

            # ---------------------------------------------------------------------------------------
            # INCLUDES

            # Compile all includes to exclude from the library because they already exist in a
            # target.
            includeExcludes = []
            for t in targets:
                for inc in self._includes[t.package]:
                    if not inc.isInList(includeExcludes):
                        includeExcludes.append(inc)

            # Compile all includes needed for the library.
            libIncludes = []
            sharedIncludes = []

            # Add the top-level includes directly because they are not handled implicitly like
            # the import case
            for inc in includes:
                if inc.isInList(includeExcludes):
                    sharedIncludes.append(inc)
                else:
                    libIncludes.append(inc)

            for t in imports + modules:
                for inc in self.includes[t.package]:
                    if not inc.isInList(libIncludes):
                        if inc.isInList(includeExcludes):
                            if not inc.isInList(sharedIncludes):
                                sharedIncludes.append(inc)
                        else:
                            libIncludes.append(inc)

            if self._verbose:
                print("\n")
                s = "IMPORTING:"
                for imp in libImports:
                    s += "\n\t" + imp.package
                for inc in libIncludes:
                    s += "\n\tEXTERNAL: " + inc.package
                self._log.add(s)

                print("\n")
                s = "EXCLUDING:"
                for imp in sharedImports:
                    s += "\n\t" + imp.package
                for inc in sharedIncludes:
                    s += "\n\tEXTERNAL: " + inc.package
                self._log.add(s)

            # ---------------------------------------------------------------------------------------
            # Construct intermediate compilation file.
            assembledFile = self._assembleFile(target, libImports, sharedImports, {"modules": modules})
            if assembledFile is None:
                self._log.write("ERROR: File assembly failed.")
                return

            # ---------------------------------------------------------------------------------------
            # Compile to Javascript
            if not self.buildOnly:
                self._compileToJavascript(target, assembledFile, libIncludes)

            if self._verbose:
                print("\n" + ("-" * 100) + "\n")

        except Exception as err:
            print("\n\n\n")
            self._log.writeError(
                "ERROR: Compilation failure for: %s\n\tsource: %s\n\troot: %s"
                % (target.package, target.path, target.rootPath),
                err,
            )

    # ___________________________________________________________________________________________________ _constructTarget
    def _constructTarget(self, target):
        try:
            if self._verbose:
                print("\n\n" + ("-" * 100) + "\n")
                self._log.write(
                    "EXECUTABLE: %s\n\tsource: %s\n\troot: %s" % (target.package, target.path, target.rootPath)
                )

            # ---------------------------------------------------------------------------------------
            # Handle imports and requires
            self._parseIncludes(target)
            self._processRequires(target)

            if self._verbose:
                s = "IMPORTING:"
                for imp in self._imports[target.package]:
                    s += "\n\t" + imp.package
                self._log.write(s)

            # ---------------------------------------------------------------------------------------
            # Construct intermediate compilation file.
            assembledFile = self._assembleFile(target)
            if assembledFile is None:
                self._log.write("ERROR: File assembly failed.")
                return

            # ---------------------------------------------------------------------------------------
            # Compile to Javascript
            if not self.buildOnly:
                self._compileToJavascript(target, assembledFile)

            if self._verbose:
                print("\n" + ("-" * 100) + "\n")

        except Exception as err:
            print("\n\n\n")
            self._log.writeError(
                "ERROR: Compilation failure for: %s\n\tsource: %s\n\troot: %s"
                % (target.package, target.path, target.rootPath),
                err,
            )

    # ___________________________________________________________________________________________________ _createOutputFile
    def _createOutputFile(self, target):
        """Creates the output ccs assembly file for writing."""
        outFile = target.assembledPath
        try:
            return open(outFile, "w")
        except Exception as err:
            print("\n\n")
            self._log.write(
                "Unable To Open output file: " + str(outFile) + "\n"
                "Check to make sure you have write permissions to that directory."
            )
            return None

    # ___________________________________________________________________________________________________ _writeRegistryEntry
    def _writeRegistryEntry(self, out, cacheOut, entry):
        # If there is an unconsumed registryEntry write it.
        if not entry:
            return None

        s = "\n" + entry + "\n"
        out.write(s)

        if cacheOut:
            cacheOut.write(s)
        return None

    # ___________________________________________________________________________________________________ _assembleFile
    def _assembleFile(self, target, importOverride=None, replacements=None, assembleData=None):

        # -------------------------------------------------------------------------------------------
        # CREATE FILE
        # Creates the file to write
        out = self._createOutputFile(target)
        if not out:
            self._log("ERROR: Unable to create output file")
            return

        # -------------------------------------------------------------------------------------------
        # DEFINE IMPORTS
        # Specify the files to import. For exec files the default packages are included, for
        # libraries these are overridden based on library target dependencies.
        targetImports = self._imports[target.package] if importOverride is None else importOverride

        replacements = replacements if isinstance(replacements, list) else []
        classList = []

        # -------------------------------------------------------------------------------------------
        # Note the last dependency so that the glue script can be appended prior
        lastDep = targetImports[-1]

        # -------------------------------------------------------------------------------------------
        # DEPENDENCY ASSEMBLY LOOP
        print("\n")
        for dep in targetImports:
            dep.open()

            if self._force or not dep.useCache:
                if not self._compileDependency(dep, out, replacements, targetImports, classList):
                    return None
                continue

            self._log.write("\tFROM CACHE: " + dep.package)
            out.write(dep.cacheSource)
            dep.close()

        out.close()

        if self._verbose:
            print("\n")
            self._log.add("CONSTRUCTED: " + out.name)

        return out.name

    # ___________________________________________________________________________________________________ _compileDependency
    def _compileDependency(self, dep, out, replacements, targetImports, classList):
        classPattern = re.compile(CoffeescriptBuilder.CLASS_PATTERN)
        missingPattern = re.compile(CoffeescriptBuilder.MISSING_CLASS_PATTERN)

        # -------------------------------------------------------------------------------------------
        # MISSING DEPENDENCIES
        # Handle missing dependencies
        if not os.path.exists(dep.path):
            print("\n\n")
            self._log.write("ERROR: " + dep.package + " package does not exist at: " + dep.path)
            return False

        lastWhitespace = ""
        openParens = 0
        openBrackets = 0
        openBraces = 0
        skipNextLine = False
        methodName = ""
        className = ""
        registryEntry = None

        raw = dep.source
        dep.close()

        s = "\n\n\t#" + ("%" * 100) + "\n\t#" + ("%" * 100) + "\n#\t\t" + dep.package + "\n"

        out.write(s)
        if dep.allowCaching:
            cacheOut = open(dep.cachePath, "w")
            cacheOut.write(s)
        else:
            try:
                if os.path.exists(dep.cachePath):
                    os.remove(dep.cachePath)
            except Exception as err:
                pass

            cacheOut = None

        self._log.write("\tCOMPILING: " + dep.package)

        analyzer = CoffeescriptAnalyzer(raw, debug=self._debug)
        analyzer.analyze()

        # ---------------------------------------------------------------------------------------
        # COMPILE
        # Line by line compile to ccs output file
        for l in analyzer:

            # -----------------------------------------------------------------------------------
            # RETARGET CLASS ACCESSORS TO VIZME registry
            # All classes (except internal class references) are made to
            # VIZME registry ClassName to prevent class conflicts.
            for rep in replacements + targetImports:
                if rep != dep:
                    offset = 0
                    res = rep.searchPattern.finditer(l.redacted)
                    for r in res:
                        start = r.start() + offset
                        end = r.end() + offset

                        if self._trace:
                            self._log.write("RETARGET: " + l.source[start:end] + " | " + str(r.groupdict()))

                        # Make the replacement and adjust offsets for additional replacements
                        l.insert(start, end, rep.registryName)
                        offset += len(rep.registryName) - end + start

            # -----------------------------------------------------------------------------------
            # IDENTIFY CLASS DEFINITIONS
            # Find class definitions so they can be added to the VIZME registry.
            res = classPattern.search(l.redacted)
            if res:
                registryEntry = self._writeRegistryEntry(out, cacheOut, registryEntry)
                className = res.group("class").strip()
                registryEntry = "\n%s.%s ?= %s" % (CoffeescriptDependency.REGISTRY, className, className)
                classList.append(className)

            # -----------------------------------------------------------------------------------
            # CHECK FOR MISSING CLASSES
            # Search and find any missing class imports. If a possible missing import is found
            # flag it in the response.
            res = missingPattern.finditer(l.redacted)
            if res:
                for r in res:
                    cn = r.group("class").strip()
                    start = r.start()

                    if cn == className:
                        continue

                    # Ignore anything in all CAPS!
                    if cn.upper() == cn:
                        continue

                    # Ignore globally defined objects and classes
                    if cn in CoffeescriptBuilder._GLOBAL_CLASSES + analyzer.globalObjects:
                        continue

                    self._warnings.append(
                        {
                            "id": CoffeescriptBuilder._WARN_ID_MISSING_IMPORT,
                            "class": cn,
                            "line": l.lineNumber,
                            "package": dep.package,
                        }
                    )

                    print("\n")
                    self._log.write(
                        "WARNING: Possible missing import\n\tmissing: %s\n\tfrom: %s [line #%s]"
                        % (cn, dep.package, str(l.lineNumber))
                    )

            # -----------------------------------------------------------------------------------
            # LINE DEBUGGER ANALYSIS
            c = l.redacted.strip()
            skip = skipNextLine or not l.isSignificant
            skipNextLine = False

            if not skip:
                skips = ["class", "try", "catch", "else", "when", ".", "+", "-", "/", "=", "*", ",", "and", "or"]
                for s in skips:
                    if c.startswith(s):
                        skip = True
                        break

            if not skip:
                skips = ["->", "=>"]
                methodPattern = re.compile("^(?P<method>[^:]+)")

                for s in skips:
                    if c.endswith(s):
                        skip = True
                        res = methodPattern.search(c)
                        if res and res.group("method"):
                            methodName = res.group("method")
                        elif c.startswith("$"):
                            methodName = "$"

                        break

            # Check for line continuations
            if l.isSignificant:
                skips = [".", "+", "-", "/", "=", "*", ",", "and", "or"]
                for s in skips:
                    if c.endswith(s):
                        skipNextLine = True
                        break

            if self._trace:
                self._log.write(
                    c.replace("\n", "")
                    + (
                        "\n\t@@@@ skip: "
                        + str(skip)
                        + "\n\t@@@@ parens: "
                        + str(openParens)
                        + "\n\t@@@@ braces: "
                        + str(openBraces)
                        + "\n\t@@@@ brackets: "
                        + str(openBraces)
                        + "\n\t@@@@ skipNext: "
                        + str(skipNextLine)
                    )
                )

            if self._debug and not skip and openParens == 0 and openBraces == 0 and openBrackets == 0:
                debugLine = "window.___vmiDebug('%s', '%s', '%s', %s)\n" % (
                    dep.package,
                    className,
                    methodName,
                    str(l.lineNumber),
                )

                indent = len(l.indent) > len(lastWhitespace)
                dedent = len(l.indent) < len(lastWhitespace)

                skips = [")", "]", "}"]
                skip = False
                for s in skips:
                    if c.startswith(s):
                        skip = True
                        break

                if dedent and skip:
                    lastWhitespace = lastWhitespace
                else:
                    lastWhitespace = l.indent

                codePattern = re.compile("(?P<code>[^\s\t\n]+)")
                res = codePattern.search(c)
                if not res or len(res.groupdict()["code"]) == 0:
                    if self._trace:
                        self._log.write('EMPTY: "' + c + '"')
                    debugLine = ""

                l.insert(0, 0, l.indent + debugLine)

            if l.isSignificant:
                openParens += l.redacted.count("(") - l.redacted.count(")")
                openBrackets += l.redacted.count("[") - l.redacted.count("]")
                openBraces += l.redacted.count("{") - l.redacted.count("}")

            # ---------------------------------------------------------------------------------------
            # WRITE MODIFIED OUTPUT
            out.write(l.source)

            if cacheOut:
                cacheOut.write(l.source)

        self._writeRegistryEntry(out, cacheOut, registryEntry)

        if cacheOut:
            cacheOut.close()

        return True

    # ___________________________________________________________________________________________________ _compileToJavascript
    def _compileToJavascript(self, target, assembledFile, jsIncludeOverrides=None):

        # Use the Coffeescript compiler to create a JS compilation of the assembled CS file
        result = SystemUtils.executeCommand(["coffee", "-c", "--bare", assembledFile])
        status = result["code"]
        output = result["out"]
        errors = 0
        forceVerbose = False

        # -------------------------------------------------------------------------------------------
        # ERROR HANDLING
        #    Check the error status of the compilation process and if a failure occurred parse the
        #    error results for display and logging.
        if status:
            outputLines = str(output).replace("\r", "").split("\n")
            for line in outputLines:
                if line.startswith("Error:") or line.startswith("SyntaxError:"):
                    errors += 1
                    result = CoffeescriptBuilder._parseError(line)
                    if result:
                        self._log.add(result)
                    else:
                        forceVerbose = True

        if forceVerbose:
            self._log.add(output)

        self._report[target.package] = errors
        if self._verbose:
            print("\n\n")
            if errors == 0 and status == 0:
                self._log.write("Compilation complete: " + target.compiledPath)
            else:
                self._log.write("Compilation FAILED: " + target.package)

        f = open(target.compiledPath, "r")
        res = f.read()
        f.close()

    # ___________________________________________________________________________________________________ _parseIncludes
    def _parseIncludes(self, target, rootTarget=None):
        """Doc..."""
        if rootTarget is None:
            rootTarget = target

        if not rootTarget.package in self._imports:
            self._imports[rootTarget.package] = []

        if not rootTarget.package in self._requires:
            self._requires[rootTarget.package] = []

        if not rootTarget.package in self._includes:
            self._includes[rootTarget.package] = []

        if not os.path.exists(target.path):
            print("\n")
            self._log.add("WARNING: Missing import.\n\tPACKAGE: " + target.package + "\n\tFILE: " + target.path)
            print("\n")
            return

        f = open(target.path)
        for line in f:

            # import parse
            dependency = CoffeescriptDependency.createImport(line, self._rootPath)
            if dependency and not dependency.isInList(self._imports[rootTarget.package]):
                self._parseIncludes(dependency, rootTarget)
                self._imports[rootTarget.package].append(dependency)
                continue

            # require parse
            dependency = CoffeescriptDependency.createRequire(line, self._rootPath)
            if dependency and not dependency.isInList(self._imports[rootTarget.package]):
                self._requires[rootTarget.package].append(dependency)
                continue

            # include parse
            dependency = CoffeescriptDependency.createInclude(line, self._rootPath)
            if dependency and not dependency.isInList(self._includes[rootTarget.package]):
                self._includes[rootTarget.package].append(dependency)
                continue

        f.close()
        self._imports[rootTarget.package].append(target)

    # ___________________________________________________________________________________________________ _processRequires
    def _processRequires(self, target):
        currentTarget = self._imports[target.package].pop()
        while len(self._requires[target.package]) > 0:
            self._parseIncludes(self._requires[target.package].pop(0), target)

        outlist = []
        for item in self._imports[target.package]:
            if not item.isInList(outlist) and not item.compare(currentTarget):
                outlist.append(item)
        self._imports[target.package] = outlist
        self._imports[target.package].append(currentTarget)

    # ___________________________________________________________________________________________________ _getLibraryData
    def _getLibraryData(self, target):
        targets = []
        modules = []
        imports = []
        includes = []

        src = open(target.path, "r")
        for line in src:

            # target parse
            d = CoffeescriptDependency.create(line, self._rootPath)
            if not d:
                continue

            if d.dependencyType == CoffeescriptDependency.TARGET_TYPE:
                targets.append(d)
            elif d.dependencyType == CoffeescriptDependency.IMPORT_TYPE:
                imports.append(d)
            elif d.dependencyType == CoffeescriptDependency.REQUIRE_TYPE:
                imports.append(d)
            elif d.dependencyType == CoffeescriptDependency.INCLUDE_TYPE:
                includes.append(d)
            elif d.dependencyType == CoffeescriptDependency.MODULE_TYPE:
                modules.append(d)
            else:
                continue

            self._parseIncludes(d)

        src.close()

        return targets, imports, modules, includes

    # ___________________________________________________________________________________________________ _compileAllInDirectory
    @staticmethod
    def _compileAllInDirectory(path, rootPath=None, debug=False, trace=False, force=False, compress=False):
        results = ""
        missing = {}
        count = 0
        for f in CoffeescriptBuilder.getScriptsInPath(path):
            target = CoffeescriptDependency(f, rootPath)
            if not (target.exists and (target.isExec or target.isLib)):
                continue

            c = CoffeescriptBuilder(target, rootPath, debug=debug, trace=trace, force=force, compress=compress)
            c.construct()
            count += 1
            for n, v in DictUtils.iter(c.report):
                num = max(0, 60 - len(n))
                results += "\n" + n + ":" + ("." * num)
                if v == 0:
                    results += "SUCCESS"
                elif v > 0:
                    results += "COMPILATION FAILED"
                else:
                    results += "ASSEMBLY FAILED"

            if len(c.warnings) > 0:
                results += "[" + str(len(c.warnings)) + " WARNINGS]"
                for v in c.warnings:
                    if not v["id"] == CoffeescriptBuilder._WARN_ID_MISSING_IMPORT:
                        continue

                    key = v["package"] + "-" + v["class"] + "-" + str(v["line"])
                    if key in missing:
                        continue

                    missing[key] = v

        if len(results) > 0:
            print("\nDIRECTORY " + path + " COMPILE RESULTS [" + str(count) + "]:" + results)
        return {"res": results, "missing": missing}

    # ___________________________________________________________________________________________________ _parseError
    @staticmethod
    def _parseError(error):
        """ Parses errors of the format:
        "Error: In /vizme2/website/js/vmi/blog/author/exec.ccs, Parse error on line 181: Unexpected 'INDENT'"
        """

        ccsFile = None

        prefixReplacements = ["SyntaxError: In ", "Error: In "]
        for p in prefixReplacements:
            error = error.replace(p, "")

        out = "\n-----------------------------------------------\nERROR: "
        try:
            sep = error.index(",")
            ccsFile = error[:sep]
        except Exception:
            pass

        try:
            sep2 = error.index(":")
            out += error[sep2 + 1 :].strip() + "\n"
        except Exception:
            if error and sep:
                out += error[sep + 1 :].strip() + "\n"

        pattern = re.compile("line[\s\t]+(?P<linenumber>[0-9]+)")
        res = pattern.search(error)
        if res and len(res.groups()) > 0:
            lineNumber = int(res.groups()[0]) - 1
        else:
            out += "    Unspecified location"
            return

        if ccsFile:
            padSize = len(str(lineNumber + 3))
            jQueryName = "Exec Function (JQUERY Document ready)"
            functionName = None
            className = None
            trace = ""
            f = open(ccsFile, "r")
            for i, line in enumerate(f):
                if i > lineNumber + 4:
                    break

                if i <= lineNumber:
                    pattern = re.compile("^class[\s\t]+(?P<classname>[a-zA-Z0-9_]+)")
                    res = pattern.search(line)
                    if res and len(res.groups()) > 0:
                        className = res.groups()[0]
                        functionName = None

                    pattern = re.compile("^\$[\s\t]*[-=]+>")
                    res = pattern.search(line)
                    if res:
                        className = jQueryName
                        functionName = None

                    pattern = re.compile("[\s\t]*(?P<name>[a-zA-Z0-9_]+)[\s\t]*:[^-=>]*[-=]+>")
                    res = pattern.search(line)
                    if res and len(res.groups()) > 0:
                        functionName = res.groups()[0]

                if i > lineNumber - 4:
                    marker = ">>" if i == lineNumber else "  "
                    trace += marker + str(i).rjust(padSize) + "| " + line

            f.close()

            if functionName:
                out += "  " + ("METHOD" if className else "FUNCTION") + ": " + functionName + "\n"

            if className:
                out += "  " + ("CLASS" if className != jQueryName else "EXEC") + ": " + className + "\n"

            out += "  TRACE:\n" + trace

        return out + "\n"
Ejemplo n.º 11
0
class TrackExporter(object):
    """A class for..."""

#===============================================================================
#                                                                                       C L A S S

    DELETED_IDENTIFIER = u'##DEL##'

#_______________________________________________________________________________
    def __init__(self, logger =None):
        """Creates a new instance of TrackExporter."""
        self.results = None
        self.logger = logger
        self.modifications = 0
        if not logger:
            self.logger = Logger(self, printOut=True)

#===============================================================================
#                                                                                     P U B L I C

#_______________________________________________________________________________
    def process(self, session, difference =True):
        """Doc..."""

        if self.results is not None:
            return True

        results = []
        model = Tracks_Track.MASTER

        if session is None:
            session = model.createSession()

        trackStores = session.query(model).all()
        index = 0
        indices = NumericUtils.linearSpace(0, len(trackStores), roundToIntegers=True)[1:]

        for trackStore in trackStores:
            track = trackStore.getMatchingTrack(session)
            if track is None:
                self.modifications += 1
                results.append({'uid':trackStore.uid, 'action':self.DELETED_IDENTIFIER})

                self.logger.write(
                    u'<div>DELETED: %s</div>' %  DictUtils.prettyPrint(
                        trackStore.toDict(uniqueOnly=True)))
            else:
                if difference:
                    diff = trackStore.toDiffDict(track.toDict())
                    if diff is not None:
                        self.modifications += 1
                        results.append(diff)

                        self.logger.write(
                            u'<div>MODIFIED: %s</div>' % trackStore.fingerprint)
                else:
                    results.append(track.toDict())

            index += 1
            if index in indices:
                self.logger.write(
                    u'<div style="color:#33CC33">%s%% Complete</div>' % StringUtils.toUnicode(
                        10*(indices.index(index) + 1)))

        self.logger.write(u'<div style="color:#33CC33">100% Complete</div>')

        self.results = results
        return True

#_______________________________________________________________________________
    def write(self, session, path, pretty =False, gzipped =True, difference =True):
        if self.results is None and not self.process(session, difference):
            return False

        try:
            JSON.toFile(path, self.results, pretty=pretty, gzipped=gzipped, throwError=True)
            return True
        except Exception as err:
            self.logger.writeError([
                u'ERROR: Unable to write export file',
                u'PATH: ' + StringUtils.toUnicode(path)], err)
            return False

#===============================================================================
#                                                                               I N T R I N S I C

#_______________________________________________________________________________
    def __repr__(self):
        return self.__str__()

#_______________________________________________________________________________
    def __unicode__(self):
        return StringUtils.toUnicode(self.__str__())

#_______________________________________________________________________________
    def __str__(self):
        return '<%s>' % self.__class__.__name__
Ejemplo n.º 12
0
class DataFormatConverter(object):
    """A class for converting between various data interchange formats, e.g. XML and JSON."""

#===================================================================================================
#                                                                                       C L A S S

#___________________________________________________________________________________________________ __init__
    def __init__(self):
        """Creates a new instance of ClassTemplate."""
        self._type = None
        self._src  = None
        self._log  = Logger('DataFormatConverter')
        self._path = None

#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: propertyName
    @property
    def source(self):
        return self._src

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ load
    def load(self, path, fileType):
        if not os.path.exists(path):
            self._log.write('ERROR: Path does not exist [%s]. Unable to load.' % path)
            return False

        try:
            fh  = codecs.open(path, 'r', 'utf-8')
            res = fh.read()
            fh.close()
            enc = res.encode('utf-8')
            self.loads(enc, fileType)
        except Exception as err:
            self._log.writeError('Failed to load source file [%s].' % path, err)
            return False

        self._path = path
        return True

#___________________________________________________________________________________________________ load
    def loads(self, srcString, srcType):
        if srcString is None:
            self._log.write('ERROR: Source string is empty or invalid.')
            return False

        srcString = StringUtils.toStr2(srcString)

        self._path = None
        self._src  = srcString
        self._type = srcType
        return True

#___________________________________________________________________________________________________ convertDirectory
    def convertDirectory(self, path, srcType, targetType, recursive =False):
        if srcType is None or targetType is None:
            self._log.write('ERROR: Source and/or target types are invalid. Operation aborted.')
            return False

        if not os.path.exists(path):
            self._log.write('ERROR: The specified path [%s] does not exist. Operation aborted.' \
                            % str(path))
            return False

        if recursive:
            FileUtils.walkPath(path, self._convertInDirectory, [srcType, targetType])
        else:
            self._convertInDirectory([srcType, targetType], path, os.listdir(path))

        return True

#___________________________________________________________________________________________________ writeToFile
    def writeToFile(self, targetType, path =None):
        if path is None and self._path is None:
            self._log.write('ERROR: Unable to write to file, no path was specified.')
            return False

        # Assign the reader based on source type
        reader = self._getParserFromType()
        if reader is None:
            self._log.write('ERROR: Unrecognized source type [%s]. Unable to convert.' % self._type)
            return False

        # Assign writer based on target type
        writer = self._getParserFromType(targetType)
        if writer is None:
            self._log.write('ERROR: Unrecognized conversion target type [%s]. Unable to convert.' \
                            % targetType)
            return False

        path = path if path else self._path
        d    = os.path.dirname(path)
        f    = os.path.basename(path).split('.')[0]
        f   += '.' + writer.TYPE_ID

        if not os.path.exists(d):
            os.makedirs(d)

        try:
            print(len(self._src))
            src = reader.parse(self._src, None, True)
        except Exception as err:
            self._log.writeError('ERROR: Failed to parse source. Conversion aborted.', err)
            return False

        try:
            res = writer.serialize(src)
        except Exception as err:
            self._log.writeError('ERROR: Failed to serialized data. Conversion aborted.', err)
            return False

        out = os.path.join(d, f)
        try:
            fh = codecs.open(out, 'wb', 'utf-8')
            fh.write(res)
            fh.close()
        except Exception as err:
            self._log.writeError('ERROR: Failed to write file [%s]. Conversion aborted.' \
                                 % str(out), err)
            return False

        self._log.write('Converted: [%s] => [%s].' % (self._path, out))
        return True

#___________________________________________________________________________________________________ getAsXML
    def getAsXML(self):
        """Doc..."""
        if self._type == XMLConfigParser.TYPE_ID:
            return self._src
        else:
            return self._convert(XMLConfigParser.TYPE_ID)

#___________________________________________________________________________________________________ getAsJSON
    def getAsJSON(self):
        """Doc..."""
        if self._type == JSONConfigParser.TYPE_ID:
            return self._src
        else:
            return self._convert(JSONConfigParser.TYPE_ID)

#___________________________________________________________________________________________________ getAsDictionary
    def getAsDictionary(self, asInterchangeFormat =False):
        reader = self._getParserFromType()
        reader.parse(self._src, None, asInterchangeFormat)

#___________________________________________________________________________________________________ executeConversion
    @staticmethod
    def executeConversion(source =None, srcType =None, targetType =None, target =None, recursive =False):
        types = ['xml', 'json']

        if source is None:
            source = queryGeneralValue('Enter the source file (or path) to convert:')

        if srcType is None and os.path.isfile(source):
            fileType = source.split('.')[-1].lower()
            if fileType in types:
                srcType = fileType

        if srcType is None:
            srcType = queryFromList('Specify source file(s) type:', types)

        if targetType is None:
            targetType = queryFromList('Specify target file(s) type:', types)

        d = DataFormatConverter()
        if os.path.isfile(source):
            d.load(source, srcType)
            d.writeToFile(targetType, target)
        else:
            d.convertDirectory(source, srcType, targetType, recursive)

#===================================================================================================
#                                                                               P R O T E C T E D

#___________________________________________________________________________________________________ _convert
    def _convert(self, targetType):
        reader = self._getParserFromType()
        data   = reader.parse(self._src, None, True)

        if data is None:
            self._log.write('ERROR: Failed to parse input from. Skipping conversion.')
            return None

        writer = self._getParserFromType(targetType)
        return writer.serialize(data)

#___________________________________________________________________________________________________ _getParserFromType
    def _getParserFromType(self, typeID =None):
        if typeID is None:
            typeID = self._type

        if typeID == XMLConfigParser.TYPE_ID:
            return XMLConfigParser
        elif typeID == JSONConfigParser.TYPE_ID:
            return JSONConfigParser
        else:
            self._log.write('ERROR: _getParserFromType() failed for type: ' + str(typeID))
            return None

#___________________________________________________________________________________________________ _convertInDirectory
    def _convertInDirectory(self, types, dirname, names):
        if dirname.find('.svn') != -1:
            return

        reader = self._getParserFromType(types[0])
        writer = self._getParserFromType(types[1])
        for n in names:
            if not n.endswith(reader.TYPE_ID):
                continue

            src = os.path.join(dirname, n)
            self.load(src, reader.TYPE_ID)
            self.writeToFile(writer.TYPE_ID)
Ejemplo n.º 13
0
class IncludeCompressor(object):

#===================================================================================================
#                                                                                       C L A S S

    _REMOVE_COMMENT_RE      = re.compile('/\*.+\*/', re.DOTALL)
    _REMOVE_COMMENT_LINE_RE = re.compile('(^|\n)[\s\t]*//.+(\n|$)')

    JS_TYPE  = 'js'
    CSS_TYPE = 'css'

#___________________________________________________________________________________________________ __init__
    def __init__(self, compileCoffee =False):
        self._log           = Logger('IncludeCompressor')
        self._compileCoffee = compileCoffee

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ compress
    def compress(self, rootPath):
        if not self._fileExists(rootPath):
            return False
        elif os.path.isfile(rootPath):
            return self.compressFile(rootPath)
        else:
            return self.compressPath(rootPath)

#___________________________________________________________________________________________________ compressFile
    def compressFile(self, rootPath, directory =None):
        if not self._fileExists(rootPath):
            return False

        if self._compileCoffee:
            try:
                from pyaid.web.coffeescript.CoffeescriptBuilder import CoffeescriptBuilder
                CoffeescriptBuilder.compileAllOnPath(rootPath, os.path.dirname(rootPath), True)
                self._log.write('Coffeescript compiled.')
            except Exception as err:
                self._log.writeError('Failed to compile coffeescript file.', err)
                return False

        return self._compressFile(rootPath, directory)

#___________________________________________________________________________________________________ compressPath
    def compressPath(self, rootPath):
        # First compile any coffee scripts to js files
        if self._compileCoffee:
            try:
                from pyaid.web.coffeescript.CoffeescriptBuilder import CoffeescriptBuilder
                CoffeescriptBuilder.compileAllOnPath(rootPath, rootPath, True)
                self._log.write('Coffee scripts compiled.')
            except Exception as err:
                self._log.writeError('Failed to compile coffeescript files.', err)
                return False

        FileUtils.walkPath(rootPath, self._compressInFolder, None)
        self._log.write('Compression operation complete.')
        return True

#===================================================================================================
#                                                                               P R O T E C T E D

#___________________________________________________________________________________________________ _fileExists
    def _fileExists(self, rootPath):
        if not os.path.exists(rootPath):
            self._log.write('ERROR: [%s] does not exist. Operation aborted.' % rootPath)
            return False

        return True

#___________________________________________________________________________________________________ _compressFile
    def _compressFile(self, target, directory):
        # Skip compiled files.
        if target.endswith('comp.js') or target.endswith('comp.css'):
            return False

        if target.endswith('.js'):
            fileType = IncludeCompressor.JS_TYPE
        elif target.endswith('.css'):
            fileType = IncludeCompressor.CSS_TYPE
        else:
            return False

        if not directory:
            directory = ''
        if not directory.endswith(os.sep) and not target.startswith(os.sep):
            directory += os.sep

        inFile     = directory + target
        tempFile   = directory + target + '.temp'

        try:
            fh         = open(inFile, 'r')
            fileString = fh.read()
            fh.close()
        except Exception as err:
            self._log.writeError('FAILED: Unable to read ' + str(inFile), err)
            return False

        if fileType == IncludeCompressor.CSS_TYPE:
            fileString = fileString.replace('@charset "utf-8";', '')
            ofn        = (target[0:-3] + 'comp.css')
        else:
            ofn = (target[0:-2] + 'comp.js')

        try:
            fh = open(tempFile, 'w')
            fh.write(fileString)
            fh.close()
        except Exception as err:
            self._log.writeError('FAILED: Unable to write temp file ' + str(tempFile), err)
            return False

        outFile = directory + '/' + ofn

        cmd    = ['minify', '"%s"' % tempFile, '"%s"' % outFile]
        result = SystemUtils.executeCommand(cmd)
        if result['code']:
            self._log.write('FAILED: Unable to compress ' + str(inFile))

        if os.path.exists(tempFile):
            os.remove(tempFile)

        if not os.path.exists(outFile):
            self._log.write('FAILED: ' + target + ' -> ' + ofn)
            return False
        elif fileType == IncludeCompressor.JS_TYPE:
            f          = open(outFile, 'r')
            compressed = f.read()
            f.close()

            compressed = IncludeCompressor._REMOVE_COMMENT_RE.sub('', compressed)
            compressed = IncludeCompressor._REMOVE_COMMENT_LINE_RE.sub('', compressed)

            f = open(outFile, 'w')
            f.write(compressed.strip())
            f.close()

        inSize  = SizeUnits.SizeConversion.bytesToKilobytes(inFile, 2)
        outSize = SizeUnits.SizeConversion.bytesToKilobytes(outFile, 2)
        saved   = SizeUnits.SizeConversion.convertDelta(
            inSize, outSize, SizeUnits.SIZES.KILOBYTES, 2)

        self._log.write(
            'Compressed[%s]: %s -> %s [%sKB -> %sKB | Saved: %sKB]' % (
                fileType, target, ofn, inSize, outSize, saved))

        return True

#___________________________________________________________________________________________________ _compressInFolder
    def _compressInFolder(self, dumb, directory, names):
        if directory.find('.svn') != -1:
                return

        for fn in names:
            self._compressFile(fn, directory)
Ejemplo n.º 14
0
# Check for PySide site package shared libraries
foundLocation = None
for p in sys.path:
    p = FileUtils.createPath(p, u'PySide', isDir=True)
    if not os.path.exists(p):
        continue
    if os.path.exists(FileUtils.createPath(p, u'QtCore.so', isFile=True)):
        foundLocation = p
        break

printResult(u'PySide (Package Libraries)', u'PASSED' if foundLocation else u'FAILED')
if foundLocation:
    print u'  * ', foundLocation

# Check for PySide
try:
    from PySide import QtCore
    printResult(u'PySide', u'PASSED')
except Exception, err:
    printResult(u'PySide', u'FAILED')
    logger.writeError(u'Unable to import PySide', err)

# Check for PyGlass
try:
    from pyglass.app.PyGlassEnvironment import PyGlassEnvironment
    printResult(u'PyGlass', u'PASSED')
except Exception, err:
    printResult(u'PyGlass', u'FAILED')
    logger.writeError(u'Unable to import PyGlass', err)
Ejemplo n.º 15
0
class CoffeescriptBuilder(object):
    """A class for..."""

    CLASS_PATTERN = '^[\s\t]*class[\s\t]+(?P<class>[^\s\t\r\n]+)[\s\t]*'
    MISSING_CLASS_PATTERN = '[\s\t\(\[\{\!]+(?=[A-Z])(?P<class>[A-Za-z0-9_]+)(?P<next>[^A-Za-z0-9_]+)'

    _WARN_ID_MISSING_IMPORT = 'MISSING-IMPORT'

    _GLOBAL_CLASSES = [
        'SFLOW', 'PAGE', 'FB', 'Math', 'JSON', 'String', 'ActiveXObject',
        'Date', 'DOMParser', 'RegExp', 'Object', 'Number', 'Array', 'Function',
        'XMLHttpRequest'
    ]

    _results = None
    _missing = None

    #===================================================================================================
    #                                                                                       C L A S S

    #___________________________________________________________________________________________________ __init__
    def __init__(self,
                 targetPackageOrPath,
                 rootPath,
                 verbose=True,
                 debug=False,
                 trace=False,
                 force=False,
                 compress=False,
                 buildOnly=False):
        """Creates a new instance of CoffeescriptBuilder."""

        self.buildOnly = buildOnly

        self._imports = dict()
        self._requires = dict()
        self._includes = dict()
        self._report = dict()
        self._warnings = []
        self._dependencyReport = dict()
        self._verbose = verbose
        self._log = Logger(self, printOut=True)
        self._trace = trace
        self._debug = debug
        self._targets = []
        self._force = force
        self._compress = compress
        self._rootPath = rootPath

        if not isinstance(targetPackageOrPath, CoffeescriptDependency):
            target = CoffeescriptDependency(targetPackageOrPath, rootPath,
                                            None)
        else:
            target = targetPackageOrPath

        if target.exists:
            self._targets.append(target)
        else:
            csFiles = CoffeescriptBuilder.getScriptsInPath(target.packagePath)

            # Look for exec matches first
            for f in csFiles:
                testTarget = CoffeescriptDependency(f, rootPath, None)
                if testTarget.isExec:
                    self._targets.append(testTarget)

            # Look for lib matches second. Lib matches are tested as a second pass because
            # constructing all exec files first potentially optimizes the import process for
            # the libraries.
            for f in csFiles:
                testTarget = CoffeescriptDependency(f, rootPath, None)
                if testTarget.isLib:
                    self._targets.append(testTarget)

        if len(self._targets) == 0:
            print('\n\n')
            self._log.write('No targets exist for: %s. Compilation aborted.' %
                            targetPackageOrPath)
            print('\n')

#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: report

    @property
    def report(self):
        return self._report

#___________________________________________________________________________________________________ GS: warnings

    @property
    def warnings(self):
        return self._warnings

#___________________________________________________________________________________________________ GS: imports

    @property
    def imports(self):
        return self._imports

#___________________________________________________________________________________________________ GS: requires

    @property
    def requires(self):
        return self._requires

#___________________________________________________________________________________________________ GS: includes

    @property
    def includes(self):
        return self._includes

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ construct

    def construct(self):
        """Doc..."""
        for t in self._targets:
            self._report[t.package] = -1
            if t.isLib:
                self._constructLibrary(t)
            else:
                self._constructTarget(t)

            if self._compress:
                print('COMPRESSING:', t.package)
                from pyaid.web.coffeescript.IncludeCompressor import IncludeCompressor
                ic = IncludeCompressor()
                if not ic.compressFile(t.compiledPath):
                    print('COMPRESSION FAILURE:', t.compiledPath)

        return self._targets

#___________________________________________________________________________________________________ compileAllOnPath

    @staticmethod
    def compileAllOnPath(path,
                         rootPath=None,
                         recursive=False,
                         debug=False,
                         trace=False,
                         force=False,
                         compress=False):

        CoffeescriptBuilder._results = ''
        CoffeescriptBuilder._missing = {}
        if recursive:
            print('RECURSIVE COMPILE AT: ' + path)

            def walker(paths, dirName, names):
                out = CoffeescriptBuilder._compileAllInDirectory(
                    os.path.join(paths[0], dirName),
                    paths[1],
                    debug=debug,
                    trace=trace,
                    force=force,
                    compress=compress)
                CoffeescriptBuilder._results += out['res']
                for n, v in DictUtils.iter(out['missing']):
                    if n in CoffeescriptBuilder._missing:
                        continue
                    CoffeescriptBuilder._missing[n] = v

            FileUtils.walkPath(path, walker, [path, rootPath])
            print('\n\nCOMPILATION RESULTS:' + CoffeescriptBuilder._results)

            if CoffeescriptBuilder._missing:
                print('\n\nMISSING IMPORTS:' + '\n\n')
                for n, v in DictUtils.iter(CoffeescriptBuilder._missing):
                    print(v['class'] + ' [LINE: #' + str(v['line']) + ' | ' +
                          v['package'] + ']')
        else:
            print('COMPILING DIRECTORY: ' + path)
            CoffeescriptBuilder._compileAllInDirectory(path,
                                                       rootPath,
                                                       debug=debug,
                                                       trace=trace,
                                                       force=force,
                                                       compress=compress)

#___________________________________________________________________________________________________ getScriptsInPath

    @staticmethod
    def getScriptsInPath(path):
        files = []

        for f in os.listdir(path):
            if f.lower().endswith('.' + CoffeescriptDependency.EXTENSION):
                files.append(os.path.join(path, f))

        return files

#===================================================================================================
#                                                                               P R O T E C T E D

#___________________________________________________________________________________________________ _constructLibrary

    def _constructLibrary(self, target):
        try:
            if self._verbose:
                print("\n\n" + ('-' * 100) + '\n')
                self._log.add('LIBRARY: %s\n\tsource: %s\n\troot: %s' %
                              (target.package, target.path, target.rootPath))

            #---------------------------------------------------------------------------------------
            # Compile all includes using library data
            targets, imports, modules, includes = self._getLibraryData(target)

            # Process requires for all of the targets
            for t in (targets + imports + modules):
                self._processRequires(t)

            #---------------------------------------------------------------------------------------
            # IMPORTS

            # Compile all excludes skipping any exec or lib files that are listed in the import
            # statements.
            importExcludes = []
            for t in targets:
                for imp in self._imports[t.package]:
                    if not (imp.isExec or imp.isLib
                            or imp.isInList(importExcludes)):
                        importExcludes.append(imp)

            # Compile all imports needed for the library. Any excludes are added to the shared
            # library to be made accessible via the VIZME registry.
            libImports = []
            sharedImports = []
            for t in (imports + modules):
                for imp in self.imports[t.package]:
                    if not imp.isInList(libImports):
                        if imp.isInList(importExcludes):
                            if not imp.isInList(sharedImports):
                                sharedImports.append(imp)
                        else:
                            libImports.append(imp)
            libImports.append(target)

            #---------------------------------------------------------------------------------------
            # INCLUDES

            # Compile all includes to exclude from the library because they already exist in a
            # target.
            includeExcludes = []
            for t in targets:
                for inc in self._includes[t.package]:
                    if not inc.isInList(includeExcludes):
                        includeExcludes.append(inc)

            # Compile all includes needed for the library.
            libIncludes = []
            sharedIncludes = []

            # Add the top-level includes directly because they are not handled implicitly like
            # the import case
            for inc in includes:
                if inc.isInList(includeExcludes):
                    sharedIncludes.append(inc)
                else:
                    libIncludes.append(inc)

            for t in (imports + modules):
                for inc in self.includes[t.package]:
                    if not inc.isInList(libIncludes):
                        if inc.isInList(includeExcludes):
                            if not inc.isInList(sharedIncludes):
                                sharedIncludes.append(inc)
                        else:
                            libIncludes.append(inc)

            if self._verbose:
                print('\n')
                s = 'IMPORTING:'
                for imp in libImports:
                    s += '\n\t' + imp.package
                for inc in libIncludes:
                    s += '\n\tEXTERNAL: ' + inc.package
                self._log.add(s)

                print('\n')
                s = 'EXCLUDING:'
                for imp in sharedImports:
                    s += '\n\t' + imp.package
                for inc in sharedIncludes:
                    s += '\n\tEXTERNAL: ' + inc.package
                self._log.add(s)

            #---------------------------------------------------------------------------------------
            # Construct intermediate compilation file.
            assembledFile = self._assembleFile(target, libImports,
                                               sharedImports,
                                               {'modules': modules})
            if assembledFile is None:
                self._log.write('ERROR: File assembly failed.')
                return

            #---------------------------------------------------------------------------------------
            # Compile to Javascript
            if not self.buildOnly:
                self._compileToJavascript(target, assembledFile, libIncludes)

            if self._verbose:
                print("\n" + ('-' * 100) + '\n')

        except Exception as err:
            print("\n\n\n")
            self._log.writeError(
                'ERROR: Compilation failure for: %s\n\tsource: %s\n\troot: %s'
                % (target.package, target.path, target.rootPath), err)

#___________________________________________________________________________________________________ _constructTarget

    def _constructTarget(self, target):
        try:
            if self._verbose:
                print("\n\n" + ('-' * 100) + '\n')
                self._log.write('EXECUTABLE: %s\n\tsource: %s\n\troot: %s' %
                                (target.package, target.path, target.rootPath))

            #---------------------------------------------------------------------------------------
            # Handle imports and requires
            self._parseIncludes(target)
            self._processRequires(target)

            if self._verbose:
                s = 'IMPORTING:'
                for imp in self._imports[target.package]:
                    s += '\n\t' + imp.package
                self._log.write(s)

            #---------------------------------------------------------------------------------------
            # Construct intermediate compilation file.
            assembledFile = self._assembleFile(target)
            if assembledFile is None:
                self._log.write('ERROR: File assembly failed.')
                return

            #---------------------------------------------------------------------------------------
            # Compile to Javascript
            if not self.buildOnly:
                self._compileToJavascript(target, assembledFile)

            if self._verbose:
                print("\n" + ('-' * 100) + '\n')

        except Exception as err:
            print("\n\n\n")
            self._log.writeError(
                'ERROR: Compilation failure for: %s\n\tsource: %s\n\troot: %s'
                % (target.package, target.path, target.rootPath), err)

#___________________________________________________________________________________________________ _createOutputFile

    def _createOutputFile(self, target):
        """Creates the output ccs assembly file for writing."""
        outFile = target.assembledPath
        try:
            return open(outFile, 'w')
        except Exception as err:
            print("\n\n")
            self._log.write('Unable To Open output file: ' + str(outFile) + '\n' \
                            'Check to make sure you have write permissions to that directory.')
            return None

#___________________________________________________________________________________________________ _writeRegistryEntry

    def _writeRegistryEntry(self, out, cacheOut, entry):
        # If there is an unconsumed registryEntry write it.
        if not entry:
            return None

        s = '\n' + entry + '\n'
        out.write(s)

        if cacheOut:
            cacheOut.write(s)
        return None

#___________________________________________________________________________________________________ _assembleFile

    def _assembleFile(self,
                      target,
                      importOverride=None,
                      replacements=None,
                      assembleData=None):

        #-------------------------------------------------------------------------------------------
        # CREATE FILE
        # Creates the file to write
        out = self._createOutputFile(target)
        if not out:
            self._log('ERROR: Unable to create output file')
            return

        #-------------------------------------------------------------------------------------------
        # DEFINE IMPORTS
        # Specify the files to import. For exec files the default packages are included, for
        # libraries these are overridden based on library target dependencies.
        targetImports = self._imports[
            target.package] if importOverride is None else importOverride

        replacements = replacements if isinstance(replacements, list) else []
        classList = []

        #-------------------------------------------------------------------------------------------
        # Note the last dependency so that the glue script can be appended prior
        lastDep = targetImports[-1]

        #-------------------------------------------------------------------------------------------
        # DEPENDENCY ASSEMBLY LOOP
        print('\n')
        for dep in targetImports:
            dep.open()

            if self._force or not dep.useCache:
                if not self._compileDependency(dep, out, replacements,
                                               targetImports, classList):
                    return None
                continue

            self._log.write('\tFROM CACHE: ' + dep.package)
            out.write(dep.cacheSource)
            dep.close()

        out.close()

        if self._verbose:
            print('\n')
            self._log.add('CONSTRUCTED: ' + out.name)

        return out.name

#___________________________________________________________________________________________________ _compileDependency

    def _compileDependency(self, dep, out, replacements, targetImports,
                           classList):
        classPattern = re.compile(CoffeescriptBuilder.CLASS_PATTERN)
        missingPattern = re.compile(CoffeescriptBuilder.MISSING_CLASS_PATTERN)

        #-------------------------------------------------------------------------------------------
        # MISSING DEPENDENCIES
        # Handle missing dependencies
        if not os.path.exists(dep.path):
            print("\n\n")
            self._log.write('ERROR: ' + dep.package +
                            ' package does not exist at: ' + dep.path)
            return False

        lastWhitespace = ''
        openParens = 0
        openBrackets = 0
        openBraces = 0
        skipNextLine = False
        methodName = ''
        className = ''
        registryEntry = None

        raw = dep.source
        dep.close()

        s = '\n\n\t#' + ('%' * 100) + '\n\t#' + (
            '%' * 100) + '\n#\t\t' + dep.package + '\n'

        out.write(s)
        if dep.allowCaching:
            cacheOut = open(dep.cachePath, 'w')
            cacheOut.write(s)
        else:
            try:
                if os.path.exists(dep.cachePath):
                    os.remove(dep.cachePath)
            except Exception as err:
                pass

            cacheOut = None

        self._log.write('\tCOMPILING: ' + dep.package)

        analyzer = CoffeescriptAnalyzer(raw, debug=self._debug)
        analyzer.analyze()

        #---------------------------------------------------------------------------------------
        # COMPILE
        # Line by line compile to ccs output file
        for l in analyzer:

            #-----------------------------------------------------------------------------------
            # RETARGET CLASS ACCESSORS TO VIZME registry
            # All classes (except internal class references) are made to
            # VIZME registry ClassName to prevent class conflicts.
            for rep in replacements + targetImports:
                if rep != dep:
                    offset = 0
                    res = rep.searchPattern.finditer(l.redacted)
                    for r in res:
                        start = r.start() + offset
                        end = r.end() + offset

                        if self._trace:
                            self._log.write('RETARGET: ' +
                                            l.source[start:end] + ' | ' +
                                            str(r.groupdict()))

                        # Make the replacement and adjust offsets for additional replacements
                        l.insert(start, end, rep.registryName)
                        offset += len(rep.registryName) - end + start

            #-----------------------------------------------------------------------------------
            # IDENTIFY CLASS DEFINITIONS
            # Find class definitions so they can be added to the VIZME registry.
            res = classPattern.search(l.redacted)
            if res:
                registryEntry = self._writeRegistryEntry(
                    out, cacheOut, registryEntry)
                className = res.group('class').strip()
                registryEntry = '\n%s.%s ?= %s' % (
                    CoffeescriptDependency.REGISTRY, className, className)
                classList.append(className)

            #-----------------------------------------------------------------------------------
            # CHECK FOR MISSING CLASSES
            # Search and find any missing class imports. If a possible missing import is found
            # flag it in the response.
            res = missingPattern.finditer(l.redacted)
            if res:
                for r in res:
                    cn = r.group('class').strip()
                    start = r.start()

                    if cn == className:
                        continue

                    # Ignore anything in all CAPS!
                    if cn.upper() == cn:
                        continue

                    # Ignore globally defined objects and classes
                    if cn in CoffeescriptBuilder._GLOBAL_CLASSES + analyzer.globalObjects:
                        continue

                    self._warnings.append({
                        'id': CoffeescriptBuilder._WARN_ID_MISSING_IMPORT,
                        'class': cn,
                        'line': l.lineNumber,
                        'package': dep.package
                    })

                    print('\n')
                    self._log.write(
                        'WARNING: Possible missing import\n\tmissing: %s\n\tfrom: %s [line #%s]'
                        % (cn, dep.package, str(l.lineNumber)))

            #-----------------------------------------------------------------------------------
            # LINE DEBUGGER ANALYSIS
            c = l.redacted.strip()
            skip = skipNextLine or not l.isSignificant
            skipNextLine = False

            if not skip:
                skips = [
                    'class', 'try', 'catch', 'else', 'when', '.', '+', '-',
                    '/', '=', '*', ',', 'and', 'or'
                ]
                for s in skips:
                    if c.startswith(s):
                        skip = True
                        break

            if not skip:
                skips = ['->', '=>']
                methodPattern = re.compile('^(?P<method>[^:]+)')

                for s in skips:
                    if c.endswith(s):
                        skip = True
                        res = methodPattern.search(c)
                        if res and res.group('method'):
                            methodName = res.group('method')
                        elif c.startswith('$'):
                            methodName = '$'

                        break

            # Check for line continuations
            if l.isSignificant:
                skips = ['.', '+', '-', '/', '=', '*', ',', 'and', 'or']
                for s in skips:
                    if c.endswith(s):
                        skipNextLine = True
                        break

            if self._trace:
                self._log.write(
                    c.replace('\n', '') +
                    ('\n\t@@@@ skip: ' + str(skip) + '\n\t@@@@ parens: ' +
                     str(openParens) + '\n\t@@@@ braces: ' + str(openBraces) +
                     '\n\t@@@@ brackets: ' + str(openBraces) +
                     '\n\t@@@@ skipNext: ' + str(skipNextLine)))

            if self._debug and not skip and openParens == 0 and openBraces == 0 and openBrackets == 0:
                debugLine = 'window.___vmiDebug(\'%s\', \'%s\', \'%s\', %s)\n' % \
                            (dep.package, className, methodName, str(l.lineNumber))

                indent = len(l.indent) > len(lastWhitespace)
                dedent = len(l.indent) < len(lastWhitespace)

                skips = [')', ']', '}']
                skip = False
                for s in skips:
                    if c.startswith(s):
                        skip = True
                        break

                if dedent and skip:
                    lastWhitespace = lastWhitespace
                else:
                    lastWhitespace = l.indent

                codePattern = re.compile('(?P<code>[^\s\t\n]+)')
                res = codePattern.search(c)
                if not res or len(res.groupdict()['code']) == 0:
                    if self._trace:
                        self._log.write('EMPTY: "' + c + '"')
                    debugLine = ''

                l.insert(0, 0, l.indent + debugLine)

            if l.isSignificant:
                openParens += l.redacted.count('(') - l.redacted.count(')')
                openBrackets += l.redacted.count('[') - l.redacted.count(']')
                openBraces += l.redacted.count('{') - l.redacted.count('}')

            #---------------------------------------------------------------------------------------
            # WRITE MODIFIED OUTPUT
            out.write(l.source)

            if cacheOut:
                cacheOut.write(l.source)

        self._writeRegistryEntry(out, cacheOut, registryEntry)

        if cacheOut:
            cacheOut.close()

        return True

#___________________________________________________________________________________________________ _compileToJavascript

    def _compileToJavascript(self,
                             target,
                             assembledFile,
                             jsIncludeOverrides=None):

        # Use the Coffeescript compiler to create a JS compilation of the assembled CS file
        result = SystemUtils.executeCommand(
            ['coffee', '-c', '--bare', assembledFile])
        status = result['code']
        output = result['out']
        errors = 0
        forceVerbose = False

        #-------------------------------------------------------------------------------------------
        # ERROR HANDLING
        #    Check the error status of the compilation process and if a failure occurred parse the
        #    error results for display and logging.
        if status:
            outputLines = str(output).replace('\r', '').split('\n')
            for line in outputLines:
                if line.startswith('Error:') or line.startswith(
                        'SyntaxError:'):
                    errors += 1
                    result = CoffeescriptBuilder._parseError(line)
                    if result:
                        self._log.add(result)
                    else:
                        forceVerbose = True

        if forceVerbose:
            self._log.add(output)

        self._report[target.package] = errors
        if self._verbose:
            print("\n\n")
            if errors == 0 and status == 0:
                self._log.write('Compilation complete: ' + target.compiledPath)
            else:
                self._log.write('Compilation FAILED: ' + target.package)

        f = open(target.compiledPath, 'r')
        res = f.read()
        f.close()

#___________________________________________________________________________________________________ _parseIncludes

    def _parseIncludes(self, target, rootTarget=None):
        """Doc..."""
        if rootTarget is None:
            rootTarget = target

        if not rootTarget.package in self._imports:
            self._imports[rootTarget.package] = []

        if not rootTarget.package in self._requires:
            self._requires[rootTarget.package] = []

        if not rootTarget.package in self._includes:
            self._includes[rootTarget.package] = []

        if not os.path.exists(target.path):
            print("\n")
            self._log.add('WARNING: Missing import.\n\tPACKAGE: ' + target.package + '\n\tFILE: ' \
                          + target.path)
            print("\n")
            return

        f = open(target.path)
        for line in f:

            # import parse
            dependency = CoffeescriptDependency.createImport(
                line, self._rootPath)
            if dependency and not dependency.isInList(
                    self._imports[rootTarget.package]):
                self._parseIncludes(dependency, rootTarget)
                self._imports[rootTarget.package].append(dependency)
                continue

            # require parse
            dependency = CoffeescriptDependency.createRequire(
                line, self._rootPath)
            if dependency and not dependency.isInList(
                    self._imports[rootTarget.package]):
                self._requires[rootTarget.package].append(dependency)
                continue

            # include parse
            dependency = CoffeescriptDependency.createInclude(
                line, self._rootPath)
            if dependency and not dependency.isInList(
                    self._includes[rootTarget.package]):
                self._includes[rootTarget.package].append(dependency)
                continue

        f.close()
        self._imports[rootTarget.package].append(target)

#___________________________________________________________________________________________________ _processRequires

    def _processRequires(self, target):
        currentTarget = self._imports[target.package].pop()
        while len(self._requires[target.package]) > 0:
            self._parseIncludes(self._requires[target.package].pop(0), target)

        outlist = []
        for item in self._imports[target.package]:
            if not item.isInList(outlist) and not item.compare(currentTarget):
                outlist.append(item)
        self._imports[target.package] = outlist
        self._imports[target.package].append(currentTarget)

#___________________________________________________________________________________________________ _getLibraryData

    def _getLibraryData(self, target):
        targets = []
        modules = []
        imports = []
        includes = []

        src = open(target.path, 'r')
        for line in src:

            # target parse
            d = CoffeescriptDependency.create(line, self._rootPath)
            if not d:
                continue

            if d.dependencyType == CoffeescriptDependency.TARGET_TYPE:
                targets.append(d)
            elif d.dependencyType == CoffeescriptDependency.IMPORT_TYPE:
                imports.append(d)
            elif d.dependencyType == CoffeescriptDependency.REQUIRE_TYPE:
                imports.append(d)
            elif d.dependencyType == CoffeescriptDependency.INCLUDE_TYPE:
                includes.append(d)
            elif d.dependencyType == CoffeescriptDependency.MODULE_TYPE:
                modules.append(d)
            else:
                continue

            self._parseIncludes(d)

        src.close()

        return targets, imports, modules, includes

#___________________________________________________________________________________________________ _compileAllInDirectory

    @staticmethod
    def _compileAllInDirectory(path,
                               rootPath=None,
                               debug=False,
                               trace=False,
                               force=False,
                               compress=False):
        results = ''
        missing = {}
        count = 0
        for f in CoffeescriptBuilder.getScriptsInPath(path):
            target = CoffeescriptDependency(f, rootPath)
            if not (target.exists and (target.isExec or target.isLib)):
                continue

            c = CoffeescriptBuilder(target,
                                    rootPath,
                                    debug=debug,
                                    trace=trace,
                                    force=force,
                                    compress=compress)
            c.construct()
            count += 1
            for n, v in DictUtils.iter(c.report):
                num = max(0, 60 - len(n))
                results += '\n' + n + ':' + ('.' * num)
                if v == 0:
                    results += 'SUCCESS'
                elif v > 0:
                    results += 'COMPILATION FAILED'
                else:
                    results += 'ASSEMBLY FAILED'

            if len(c.warnings) > 0:
                results += '[' + str(len(c.warnings)) + ' WARNINGS]'
                for v in c.warnings:
                    if not v[
                            'id'] == CoffeescriptBuilder._WARN_ID_MISSING_IMPORT:
                        continue

                    key = v['package'] + '-' + v['class'] + '-' + str(
                        v['line'])
                    if key in missing:
                        continue

                    missing[key] = v

        if len(results) > 0:
            print('\nDIRECTORY ' + path + ' COMPILE RESULTS [' + str(count) +
                  ']:' + results)
        return {'res': results, 'missing': missing}

#___________________________________________________________________________________________________ _parseError

    @staticmethod
    def _parseError(error):
        """ Parses errors of the format:
        "Error: In /vizme2/website/js/vmi/blog/author/exec.ccs, Parse error on line 181: Unexpected 'INDENT'"
        """

        ccsFile = None

        prefixReplacements = ['SyntaxError: In ', 'Error: In ']
        for p in prefixReplacements:
            error = error.replace(p, '')

        out = '\n-----------------------------------------------\nERROR: '
        try:
            sep = error.index(',')
            ccsFile = error[:sep]
        except Exception:
            pass

        try:
            sep2 = error.index(':')
            out += error[sep2 + 1:].strip() + '\n'
        except Exception:
            if error and sep:
                out += error[sep + 1:].strip() + '\n'

        pattern = re.compile('line[\s\t]+(?P<linenumber>[0-9]+)')
        res = pattern.search(error)
        if res and len(res.groups()) > 0:
            lineNumber = int(res.groups()[0]) - 1
        else:
            out += '    Unspecified location'
            return

        if ccsFile:
            padSize = len(str(lineNumber + 3))
            jQueryName = 'Exec Function (JQUERY Document ready)'
            functionName = None
            className = None
            trace = ''
            f = open(ccsFile, 'r')
            for i, line in enumerate(f):
                if i > lineNumber + 4:
                    break

                if i <= lineNumber:
                    pattern = re.compile(
                        '^class[\s\t]+(?P<classname>[a-zA-Z0-9_]+)')
                    res = pattern.search(line)
                    if res and len(res.groups()) > 0:
                        className = res.groups()[0]
                        functionName = None

                    pattern = re.compile('^\$[\s\t]*[-=]+>')
                    res = pattern.search(line)
                    if res:
                        className = jQueryName
                        functionName = None

                    pattern = re.compile(
                        '[\s\t]*(?P<name>[a-zA-Z0-9_]+)[\s\t]*:[^-=>]*[-=]+>')
                    res = pattern.search(line)
                    if res and len(res.groups()) > 0:
                        functionName = res.groups()[0]

                if i > lineNumber - 4:
                    marker = ">>" if i == lineNumber else "  "
                    trace += marker + str(i).rjust(padSize) + '| ' + line

            f.close()

            if functionName:
                out += "  " + ("METHOD" if className else
                               "FUNCTION") + ": " + functionName + "\n"

            if className:
                out += "  " + ("CLASS" if className != jQueryName else
                               "EXEC") + ": " + className + "\n"

            out += "  TRACE:\n" + trace

        return out + "\n"
Ejemplo n.º 16
0
class CoffeescriptBuilder(object):
    """A class for..."""

    CLASS_PATTERN         = '^[\s\t]*class[\s\t]+(?P<class>[^\s\t\r\n]+)[\s\t]*'
    MISSING_CLASS_PATTERN = '[\s\t\(\[\{\!]+(?=[A-Z])(?P<class>[A-Za-z0-9_]+)(?P<next>[^A-Za-z0-9_]+)'

    _WARN_ID_MISSING_IMPORT = 'MISSING-IMPORT'

    _GLOBAL_CLASSES = [
        'SFLOW', 'PAGE', 'FB', 'Math', 'JSON', 'String', 'ActiveXObject', 'Date', 'DOMParser',
        'RegExp', 'Object', 'Number', 'Array', 'Function', 'XMLHttpRequest']

    _results = None
    _missing = None

#===================================================================================================
#                                                                                       C L A S S

#___________________________________________________________________________________________________ __init__
    def __init__(
            self, targetPackageOrPath, rootPath, verbose =True, debug =False, trace = False,
            force =False, compress =False, buildOnly =False
    ):
        """Creates a new instance of CoffeescriptBuilder."""

        self.buildOnly = buildOnly

        self._imports           = dict()
        self._requires          = dict()
        self._includes          = dict()
        self._report            = dict()
        self._warnings          = []
        self._dependencyReport  = dict()
        self._verbose  = verbose
        self._log      = Logger(self, printOut=True)
        self._trace    = trace
        self._debug    = debug
        self._targets  = []
        self._force    = force
        self._compress = compress
        self._rootPath = rootPath

        if not isinstance(targetPackageOrPath, CoffeescriptDependency):
            target = CoffeescriptDependency(targetPackageOrPath, rootPath, None)
        else:
            target = targetPackageOrPath

        if target.exists:
            self._targets.append(target)
        else:
            csFiles = CoffeescriptBuilder.getScriptsInPath(target.packagePath)

            # Look for exec matches first
            for f in csFiles:
                testTarget = CoffeescriptDependency(f, rootPath, None)
                if testTarget.isExec:
                    self._targets.append(testTarget)

            # Look for lib matches second. Lib matches are tested as a second pass because
            # constructing all exec files first potentially optimizes the import process for
            # the libraries.
            for f in csFiles:
                testTarget = CoffeescriptDependency(f, rootPath, None)
                if testTarget.isLib:
                    self._targets.append(testTarget)

        if len(self._targets) == 0:
            print '\n\n'
            self._log.write('No targets exist for: %s. Compilation aborted.' % targetPackageOrPath)
            print '\n'

#===================================================================================================
#                                                                                   G E T / S E T

#___________________________________________________________________________________________________ GS: report
    @property
    def report(self):
        return self._report

#___________________________________________________________________________________________________ GS: warnings
    @property
    def warnings(self):
        return self._warnings

#___________________________________________________________________________________________________ GS: imports
    @property
    def imports(self):
        return self._imports

#___________________________________________________________________________________________________ GS: requires
    @property
    def requires(self):
        return self._requires

#___________________________________________________________________________________________________ GS: includes
    @property
    def includes(self):
        return self._includes

#===================================================================================================
#                                                                                     P U B L I C

#___________________________________________________________________________________________________ construct
    def construct(self):
        """Doc..."""
        for t in self._targets:
            self._report[t.package] = -1
            if t.isLib:
                self._constructLibrary(t)
            else:
                self._constructTarget(t)

            if self._compress:
                print 'COMPRESSING:',t.package
                from pyaid.web.coffeescript.IncludeCompressor import IncludeCompressor
                ic = IncludeCompressor()
                if not ic.compressFile(t.compiledPath):
                    print 'COMPRESSION FAILURE:',t.compiledPath

        return self._targets

#___________________________________________________________________________________________________ compileAllOnPath
    @staticmethod
    def compileAllOnPath(path, rootPath =None, recursive =False, debug =False, trace =False,
                         force =False, compress=False):

        CoffeescriptBuilder._results = ''
        CoffeescriptBuilder._missing = {}
        if recursive:
            print 'RECURSIVE COMPILE AT: ' + path
            def walker(paths, dirName, names):
                out = CoffeescriptBuilder._compileAllInDirectory(
                    os.path.join(paths[0], dirName), paths[1], debug=debug, trace=trace,
                    force=force, compress=compress
                )
                CoffeescriptBuilder._results += out['res']
                for n,v in out['missing'].iteritems():
                    if n in CoffeescriptBuilder._missing:
                        continue
                    CoffeescriptBuilder._missing[n] = v

            os.path.walk(path, walker, [path, rootPath])
            print '\n\nCOMPILATION RESULTS:' + CoffeescriptBuilder._results

            if CoffeescriptBuilder._missing:
                print '\n\nMISSING IMPORTS:' + '\n\n'
                for n,v in CoffeescriptBuilder._missing.iteritems():
                    print v['class'] + ' [LINE: #' + str(v['line']) + ' | ' + v['package'] + ']'
        else:
            print 'COMPILING DIRECTORY: ' + path
            CoffeescriptBuilder._compileAllInDirectory(
                path, rootPath, debug=debug, trace=trace, force=force, compress=compress)

#___________________________________________________________________________________________________ getScriptsInPath
    @staticmethod
    def getScriptsInPath(path):
        files = []

        for f in os.listdir(path):
            if f.lower().endswith('.' + CoffeescriptDependency.EXTENSION):
                files.append(os.path.join(path, f))

        return files

#===================================================================================================
#                                                                               P R O T E C T E D

#___________________________________________________________________________________________________ _constructLibrary
    def _constructLibrary(self, target):
        try:
            if self._verbose:
                print "\n\n" + ('-'*100) + '\n'
                self._log.add(
                    'LIBRARY: %s\n\tsource: %s\n\troot: %s' % (
                        target.package, target.path, target.rootPath))

            #---------------------------------------------------------------------------------------
            # Compile all includes using library data
            targets, imports, modules, includes = self._getLibraryData(target)

            # Process requires for all of the targets
            for t in (targets + imports + modules):
                self._processRequires(t)

            #---------------------------------------------------------------------------------------
            # IMPORTS

            # Compile all excludes skipping any exec or lib files that are listed in the import
            # statements.
            importExcludes = []
            for t in targets:
                for imp in self._imports[t.package]:
                    if not (imp.isExec or imp.isLib or imp.isInList(importExcludes)):
                        importExcludes.append(imp)

            # Compile all imports needed for the library. Any excludes are added to the shared
            # library to be made accessible via the VIZME registry.
            libImports    = []
            sharedImports = []
            for t in (imports + modules):
                for imp in self.imports[t.package]:
                    if not imp.isInList(libImports):
                        if imp.isInList(importExcludes):
                            if not imp.isInList(sharedImports):
                                sharedImports.append(imp)
                        else:
                            libImports.append(imp)
            libImports.append(target)

            #---------------------------------------------------------------------------------------
            # INCLUDES

            # Compile all includes to exclude from the library because they already exist in a
            # target.
            includeExcludes = []
            for t in targets:
                for inc in self._includes[t.package]:
                    if not inc.isInList(includeExcludes):
                        includeExcludes.append(inc)

            # Compile all includes needed for the library.
            libIncludes    = []
            sharedIncludes = []

            # Add the top-level includes directly because they are not handled implicitly like
            # the import case
            for inc in includes:
                if inc.isInList(includeExcludes):
                    sharedIncludes.append(inc)
                else:
                    libIncludes.append(inc)

            for t in (imports + modules):
                for inc in self.includes[t.package]:
                    if not inc.isInList(libIncludes):
                        if inc.isInList(includeExcludes):
                            if not inc.isInList(sharedIncludes):
                                sharedIncludes.append(inc)
                        else:
                            libIncludes.append(inc)

            if self._verbose:
                print '\n'
                s = 'IMPORTING:'
                for imp in libImports:
                    s += '\n\t' + imp.package
                for inc in libIncludes:
                    s += '\n\tEXTERNAL: ' + inc.package
                self._log.add(s)

                print '\n'
                s = 'EXCLUDING:'
                for imp in sharedImports:
                    s += '\n\t' + imp.package
                for inc in sharedIncludes:
                    s += '\n\tEXTERNAL: ' + inc.package
                self._log.add(s)

            #---------------------------------------------------------------------------------------
            # Construct intermediate compilation file.
            assembledFile = self._assembleFile(
                target, libImports, sharedImports, {'modules':modules}
            )
            if assembledFile is None:
                self._log.write('ERROR: File assembly failed.')
                return

            #---------------------------------------------------------------------------------------
            # Compile to Javascript
            if not self.buildOnly:
                self._compileToJavascript(target, assembledFile, libIncludes)

            if self._verbose:
                print "\n" + ('-'*100) + '\n'

        except Exception, err:
            print "\n\n\n"
            self._log.writeError(
                'ERROR: Compilation failure for: %s\n\tsource: %s\n\troot: %s'
                % (target.package, target.path, target.rootPath), err)