Beispiel #1
0
    def set_linear_settings(self):
        
        # Make render tab set to Mentalray
        current_renderer = pm.setAttr('defaultRenderGlobals.currentRenderer', 'mentalRay', type='string')

        # Turn on Enable Color Management and update the other widgets accordingly
        Enable_color_managment = pm.setAttr('defaultRenderGlobals.colorProfileEnabled', 1)
        pm.mel.updateCommonColorProfile()

        utils.set_attrs( pm, {
            # Swich Image Format to EXR and Image compression to PIZ
            'defaultRenderGlobals.imageFormat': 51,
            'defaultRenderGlobals.imfPluginKey':'exr',
            'mentalrayGlobals.imageCompression': 3,
            # Switch Date Type to RGBA(Float) 4*32 Bit under Framebuffer in Quality tab
            'miDefaultFramebuffer.datatype': 5,
            #Set the Default View Color Manager 
            'defaultViewColorManager.imageColorProfile': 2,
        })
Beispiel #2
0
    def set_linear_settings(self):
        """
        Sets global renderer setting for the VRay linear workflow
        """

        # Make sure that VRay plugin loaded
        # and render tab set to vray
        pm.mel.loadPlugin('vrayformaya')
        pm.mel.vrayRegisterRenderer()
        pm.mel.vrayCreateVRaySettingsNode()
        current_renderer = pm.Attribute('defaultRenderGlobals.currentRenderer')
        current_renderer.set('vray')
        # Get scene VRay settngs node
        vray_settings = pm.PyNode('vraySettings')

        utils.set_attrs(vray_settings, {
            # Image File Output
            'imageFormatStr': 'exr',
            # Color mapping
            'cmap_adaptationOnly': 1, # Don't affect colors, only adaptation
            # Add your attribute here...
        })
Beispiel #3
0
def raw_to_h5(filenames, out = "out.h5", handlers = available_handlers,
              t0 = dp.parse('2004-01-01 00:00:00 +0000'), skip_on_assert = False, show_progress = True, ignore_errors = False, skip_unhandled = False):
    """
    converts ASCII data to HDF5 tables
        filenames : iterable, filenames of all data files (events, weather, etc.) in any order
              out : string, filename of outputfile (default='out.h5')
         handlers : iterable of LineHandlers for parsing the data files (default=available_handlers)
               t0 : datetime, reference time for 'time' column,
                    time is stored as 'time since t0' (default='2004-01-01 00:00:00 +0100')
    skip_on_assert: if True, skip lines that are invalid (if LineHandler.verify() raises AssertionError)
                    (default=False, exception is raised)
    """

    _filenames = []
    for f in filenames:
        if path.exists(f):
            if path.isdir(f):
                for dn, dns, fns in os.walk(f):
                    for fn in fns:
                        _filenames.append(path.join(dn, fn))
            else:
                _filenames.append(f)
    filenames = _filenames
    if verbose > 0:
        print 'files to process:'
        for fn in filenames:
            print '   ', fn

    assert len(filenames) > 0, 'no input files'

    if show_progress:
        pb = ProgressBar(maxval = len(filenames), widgets = [Bar(), ' ', Percentage(), ' ', ETA()], fd = sys.stdout)
        print "reference time t0 =", t0
        print 'autodetecting file types...'

    def read_files(files, row, handler):
        for f in files:
            for entry in fileiter(f, handler, skip_on_assert, show_progress, ignore_errors):
                try:
                    for k, v in zip(handler.col_names, entry):
                        if isinstance(v, dt.datetime):
                            row[k] = (v - t0).total_seconds()
                        else:
                            row[k] = v

                    row.append()

                except StopIteration:
                    pass

            if show_progress:
                pb.update(pb.currval + 1)

    # autodetect handlers and time sort files
    files_dict = detect_and_sort(filenames, skip_unhandled)

    if show_progress:
        print 'detected file types:', ['{} ({})'.format(k.table_name, len(v)) for k, v in files_dict.iteritems()]
        print "processing data... (%d files)" % (len(filenames),)
        pb.start()

    # create HDF5 file
    filters = t.Filters(complevel = 1, complib = 'zlib')
    with t.openFile(out, 'w', 'datafile created with raw_to_h5', filters = filters) as h5:
        h5.root._v_attrs.creationdate = dt.datetime.now(pytz.utc).isoformat()
        raw = h5.createGroup(h5.root, 'raw', 'raw data')

        # create and fill raw data tables
        for handler, files in files_dict.iteritems():
            handler = handler()  # instanciate the LineHandler
            title = handler.table_title
            if show_progress:
                print 'creating table: %s (%s)' % (handler.table_name, title)
            table = h5.createTable(raw, handler.table_name, handler.col_descriptor,
                                   title, expectedrows = 10000 * len(files))
            set_attrs(table, t0, handler.col_units)
            read_files(files, table.row, handler)
            table.flush()

    if show_progress:
        pb.finish()
Beispiel #4
0
def merge(primary_file, secondary_file = None, outfile = None, primary_table = None, secondary_table = None, merge_on = 'time', max_inter = 4 * 3600, quiet = False):
    # open data file(s)
    if outfile is None:
        h5pri = h5out = t.openFile(primary_file, 'r+')
    else:
        h5pri = t.openFile(primary_file, 'r')
        h5out = t.openFile(outfile, 'w')

    if secondary_file is None or os.path.abspath(primary_file) == os.path.abspath(secondary_file):
        h5sec = h5pri
    else:
        h5sec = t.openFile(secondary_file, 'r')

    with h5pri, h5sec, h5out:

        # open event table and read t0
        pri_table = h5pri.getNode(primary_table)
        _printinfo(pri_table)
        t0e = dp.parse(pri_table.attrs.t0)
        eunits = list(json.loads(pri_table.attrs.units))
        assert pri_table.nrows > 1

        # open weather table and read t0
        sec_table = h5sec.getNode(secondary_table)
        _printinfo(sec_table)
        t0w = dp.parse(sec_table.attrs.t0)
        wunits = list(json.loads(sec_table.attrs.units))
        assert sec_table.nrows > 1

        # global t0 is the smaller one
        t0 = min(t0w, t0e)
        print "global reference time t0 =", t0

        # time offset for weather, to adjust for new global t0
        toffw = (t0w - t0).total_seconds()
        if toffw != 0:
            print "weather time offset =", toffw

        # time offset for events, to adjust for new global t0
        toffe = (t0e - t0).total_seconds()
        if toffe != 0:
            print "  event time offset =", toffe

        # get column index for 'time' in each table
        etime = pri_table.colnames.index(merge_on)
        wtime = sec_table.colnames.index(merge_on)

        # build column descriptors for table of merged data
        merged_descriptor = OrderedDict()  # keep the order
        for k in pri_table.colnames:
            merged_descriptor[k] = pri_table.coldescrs[k]

        # add cols of weather table
        weather_colnames = list(sec_table.colnames)  # work on copy
        weather_colnames.pop(wtime)  # remove 'time' column
        for k in weather_colnames:  # add remaining cols
            merged_descriptor[k] = sec_table.coldescrs[k].copy()

        # adjust position fields (column order in descriptors)
        for i, v in enumerate(merged_descriptor.values()):
            v._v_pos = i

        # merge unit description
        merged_units = eunits
        wunits.pop(wtime)
        merged_units.extend(wunits)

        # create table for merged data
        try:
            merged = h5out.getNode('/merged')
        except:
            merged = h5out.createGroup(h5out .root, 'merged', 'merged data')

        merged_table = h5out.createTable(merged, os.path.basename(primary_table), merged_descriptor,
                                       pri_table._v_title + ' merged with ' + sec_table._v_title ,
                                       expectedrows = pri_table.nrows)
        set_attrs(merged_table, t0, tuple(merged_units))  # store new global t0 with this table
        row = merged_table.row

        _printinfo(merged_table)

        # get the first TWO weather rows, i.e. the first weather interval
        weather_iterator = sec_table.iterrows()
        weather_0 = weather_iterator.next()[:]
        tw0 = weather_0[wtime] + toffw
        weather_1 = weather_iterator.next()[:]
        tw1 = weather_1[wtime] + toffw

        # start console progress bar
        print "merging data..."
        if not quiet:
            pb = ProgressBar(maxval = pri_table.nrows,
                             widgets = [Bar(), ' ', Percentage(), ' ', ETA()], fd = sys.stdout)
            pb.start()

        # loop over events
        event_counter = 0
        for event in pri_table:
            if not quiet:
                pb.update(pb.currval + 1)  # update progress bar

            # skip to next event if weather too new
            te = event[etime] + toffe  # adjust for global t0, apply offset
            if te < tw0:  # if eventime < start of weather interval...
                continue

            try:  # skip to next pair of weather data until the event is contained
                while not (tw0 <= te <= tw1):
                    weather_0 = weather_1  # shift 1 -> 0
                    tw0 = weather_0[wtime] + toffw  # adjust for global t0, apply offset
                    weather_1 = weather_iterator.next()[:]  # get next weather row as tuple (do [:])
                    tw1 = weather_1[wtime] + toffw  # adjust for global t0, apply offset

            except StopIteration:
                break  # exit event loop if there are no more weather rows

            # skip event if weather interval too long (weather regarded as invalid)
            if (tw1 - tw0) > max_inter:
                continue

            # interpolate weather data to event time
            winterp = _interpolate(te, weather_0, weather_1, wtime)
            winterp.pop(wtime)  # remove 'time' col from interpolated weather

            # merged data: event with weather data
            ew = list(event[:])  # copy event data
            ew[etime] = te;  # update event time (because of offset)
            ew.extend(winterp)  # add interpolated weather data

            assert len(merged_descriptor) == len(ew)  # length must match

            # write newly merged data into row
            for k, v in zip(merged_descriptor.keys(), ew):
                row[k] = v

            # append row
            row.append()
            event_counter += 1  # count merged events

        if not quiet:
            pb.finish()  # finish progress bar

        merged_table.flush()  # force writing the table

        # output status information
        print "merged %d of %d events, skipped %d" % (event_counter, pri_table.nrows, pri_table.nrows - event_counter)
        print "first merged record", seconds2datetime(t0, merged_table[0][etime])
        print " last merged record", seconds2datetime(t0, merged_table[-1][etime])
Beispiel #5
0
def raw_to_h5(filenames,
              out="out.h5",
              handlers=available_handlers,
              t0=dp.parse('2004-01-01 00:00:00 +0000'),
              skip_on_assert=False,
              show_progress=True,
              ignore_errors=False,
              skip_unhandled=False):
    """
    converts ASCII data to HDF5 tables
        filenames : iterable, filenames of all data files (events, weather, etc.) in any order
              out : string, filename of outputfile (default='out.h5')
         handlers : iterable of LineHandlers for parsing the data files (default=available_handlers)
               t0 : datetime, reference time for 'time' column,
                    time is stored as 'time since t0' (default='2004-01-01 00:00:00 +0100')
    skip_on_assert: if True, skip lines that are invalid (if LineHandler.verify() raises AssertionError)
                    (default=False, exception is raised)
    """

    _filenames = []
    for f in filenames:
        if path.exists(f):
            if path.isdir(f):
                for dn, dns, fns in os.walk(f):
                    for fn in fns:
                        _filenames.append(path.join(dn, fn))
            else:
                _filenames.append(f)
    filenames = _filenames
    if verbose > 0:
        print 'files to process:'
        for fn in filenames:
            print '   ', fn

    assert len(filenames) > 0, 'no input files'

    if show_progress:
        pb = ProgressBar(maxval=len(filenames),
                         widgets=[Bar(), ' ',
                                  Percentage(), ' ',
                                  ETA()],
                         fd=sys.stdout)
        print "reference time t0 =", t0
        print 'autodetecting file types...'

    def read_files(files, row, handler):
        for f in files:
            for entry in fileiter(f, handler, skip_on_assert, show_progress,
                                  ignore_errors):
                try:
                    for k, v in zip(handler.col_names, entry):
                        if isinstance(v, dt.datetime):
                            row[k] = (v - t0).total_seconds()
                        else:
                            row[k] = v

                    row.append()

                except StopIteration:
                    pass

            if show_progress:
                pb.update(pb.currval + 1)

    # autodetect handlers and time sort files
    files_dict = detect_and_sort(filenames, skip_unhandled)

    if show_progress:
        print 'detected file types:', [
            '{} ({})'.format(k.table_name, len(v))
            for k, v in files_dict.iteritems()
        ]
        print "processing data... (%d files)" % (len(filenames), )
        pb.start()

    # create HDF5 file
    filters = t.Filters(complevel=1, complib='zlib')
    with t.openFile(out,
                    'w',
                    'datafile created with raw_to_h5',
                    filters=filters) as h5:
        h5.root._v_attrs.creationdate = dt.datetime.now(pytz.utc).isoformat()
        raw = h5.createGroup(h5.root, 'raw', 'raw data')

        # create and fill raw data tables
        for handler, files in files_dict.iteritems():
            handler = handler()  # instanciate the LineHandler
            title = handler.table_title
            if show_progress:
                print 'creating table: %s (%s)' % (handler.table_name, title)
            table = h5.createTable(raw,
                                   handler.table_name,
                                   handler.col_descriptor,
                                   title,
                                   expectedrows=10000 * len(files))
            set_attrs(table, t0, handler.col_units)
            read_files(files, table.row, handler)
            table.flush()

    if show_progress:
        pb.finish()
Beispiel #6
0
def merge(primary_file,
          secondary_file=None,
          outfile=None,
          primary_table=None,
          secondary_table=None,
          merge_on='time',
          max_inter=4 * 3600,
          quiet=False):
    # open data file(s)
    if outfile is None:
        h5pri = h5out = t.openFile(primary_file, 'r+')
    else:
        h5pri = t.openFile(primary_file, 'r')
        h5out = t.openFile(outfile, 'w')

    if secondary_file is None or os.path.abspath(
            primary_file) == os.path.abspath(secondary_file):
        h5sec = h5pri
    else:
        h5sec = t.openFile(secondary_file, 'r')

    with h5pri, h5sec, h5out:

        # open event table and read t0
        pri_table = h5pri.getNode(primary_table)
        _printinfo(pri_table)
        t0e = dp.parse(pri_table.attrs.t0)
        eunits = list(json.loads(pri_table.attrs.units))
        assert pri_table.nrows > 1

        # open weather table and read t0
        sec_table = h5sec.getNode(secondary_table)
        _printinfo(sec_table)
        t0w = dp.parse(sec_table.attrs.t0)
        wunits = list(json.loads(sec_table.attrs.units))
        assert sec_table.nrows > 1

        # global t0 is the smaller one
        t0 = min(t0w, t0e)
        print "global reference time t0 =", t0

        # time offset for weather, to adjust for new global t0
        toffw = (t0w - t0).total_seconds()
        if toffw != 0:
            print "weather time offset =", toffw

        # time offset for events, to adjust for new global t0
        toffe = (t0e - t0).total_seconds()
        if toffe != 0:
            print "  event time offset =", toffe

        # get column index for 'time' in each table
        etime = pri_table.colnames.index(merge_on)
        wtime = sec_table.colnames.index(merge_on)

        # build column descriptors for table of merged data
        merged_descriptor = OrderedDict()  # keep the order
        for k in pri_table.colnames:
            merged_descriptor[k] = pri_table.coldescrs[k]

        # add cols of weather table
        weather_colnames = list(sec_table.colnames)  # work on copy
        weather_colnames.pop(wtime)  # remove 'time' column
        for k in weather_colnames:  # add remaining cols
            merged_descriptor[k] = sec_table.coldescrs[k].copy()

        # adjust position fields (column order in descriptors)
        for i, v in enumerate(merged_descriptor.values()):
            v._v_pos = i

        # merge unit description
        merged_units = eunits
        wunits.pop(wtime)
        merged_units.extend(wunits)

        # create table for merged data
        try:
            merged = h5out.getNode('/merged')
        except:
            merged = h5out.createGroup(h5out.root, 'merged', 'merged data')

        merged_table = h5out.createTable(merged,
                                         os.path.basename(primary_table),
                                         merged_descriptor,
                                         pri_table._v_title + ' merged with ' +
                                         sec_table._v_title,
                                         expectedrows=pri_table.nrows)
        set_attrs(merged_table, t0,
                  tuple(merged_units))  # store new global t0 with this table
        row = merged_table.row

        _printinfo(merged_table)

        # get the first TWO weather rows, i.e. the first weather interval
        weather_iterator = sec_table.iterrows()
        weather_0 = weather_iterator.next()[:]
        tw0 = weather_0[wtime] + toffw
        weather_1 = weather_iterator.next()[:]
        tw1 = weather_1[wtime] + toffw

        # start console progress bar
        print "merging data..."
        if not quiet:
            pb = ProgressBar(maxval=pri_table.nrows,
                             widgets=[Bar(), ' ',
                                      Percentage(), ' ',
                                      ETA()],
                             fd=sys.stdout)
            pb.start()

        # loop over events
        event_counter = 0
        for event in pri_table:
            if not quiet:
                pb.update(pb.currval + 1)  # update progress bar

            # skip to next event if weather too new
            te = event[etime] + toffe  # adjust for global t0, apply offset
            if te < tw0:  # if eventime < start of weather interval...
                continue

            try:  # skip to next pair of weather data until the event is contained
                while not (tw0 <= te <= tw1):
                    weather_0 = weather_1  # shift 1 -> 0
                    tw0 = weather_0[
                        wtime] + toffw  # adjust for global t0, apply offset
                    weather_1 = weather_iterator.next(
                    )[:]  # get next weather row as tuple (do [:])
                    tw1 = weather_1[
                        wtime] + toffw  # adjust for global t0, apply offset

            except StopIteration:
                break  # exit event loop if there are no more weather rows

            # skip event if weather interval too long (weather regarded as invalid)
            if (tw1 - tw0) > max_inter:
                continue

            # interpolate weather data to event time
            winterp = _interpolate(te, weather_0, weather_1, wtime)
            winterp.pop(wtime)  # remove 'time' col from interpolated weather

            # merged data: event with weather data
            ew = list(event[:])  # copy event data
            ew[etime] = te
            # update event time (because of offset)
            ew.extend(winterp)  # add interpolated weather data

            assert len(merged_descriptor) == len(ew)  # length must match

            # write newly merged data into row
            for k, v in zip(merged_descriptor.keys(), ew):
                row[k] = v

            # append row
            row.append()
            event_counter += 1  # count merged events

        if not quiet:
            pb.finish()  # finish progress bar

        merged_table.flush()  # force writing the table

        # output status information
        print "merged %d of %d events, skipped %d" % (
            event_counter, pri_table.nrows, pri_table.nrows - event_counter)
        print "first merged record", seconds2datetime(t0,
                                                      merged_table[0][etime])
        print " last merged record", seconds2datetime(t0,
                                                      merged_table[-1][etime])