示例#1
0
 def run_component(self, core, progress_dialog):
     #need to have x monotonically increasing...
     xyvals = zip(*sorted([(sample['depth'].magnitude,
                            sample['Calibrated 14C Age'].magnitude)
                           for sample in core]))
     core.properties['Age/Depth Model'] = datastructures.PointlistInterpolation(*xyvals,
             run = core.run)
示例#2
0
 def run_component(self, core, progress_dialog):
     x = [sample['depth'] for sample in core]
     y = [sample['Calibrated 14C Age'] for sample in core]
     interp_func = scipy.interpolate.interp1d([float(i) for i in x], [float(i) for i in y],
                            bounds_error=False, fill_value=0, kind='quadratic')
     new_x = np.arange(min(x), max(x), abs(max(x)-min(x))/100.0)
     xyvals = zip(*sorted([(i, interp_func(i)) for i in new_x]))
     core.properties['Age/Depth Model'] = datastructures.PointlistInterpolation(*xyvals) 
示例#3
0
 def run_component(self, core, progress_dialog):
     x = [sample['depth'].item() for sample in core]
     y = [sample['Calibrated 14C Age'] for sample in core]
     slope, y_intcpt, r_value, p_value, std_err = scipy.stats.linregress(x, y)
     xyvals = zip(*sorted([(i, y_intcpt + slope * i)
                           for i in x]))
     core.properties['Age/Depth Model'] = datastructures.PointlistInterpolation(*xyvals,
             run = core.run)
示例#4
0
    def transform_dict_out(self, value):
        if value.get('_datatype', None) == 'baconinfo':
            return datastructures.BaconInfo(value['csv_data'])

        if value.get('_datatype', None) == 'pointlist':
            return datastructures.PointlistInterpolation(
                value['xpoints'], value['ypoints'])
        return None
示例#5
0
 def run_component(self, core, progress_dialog):
     xyvals = zip(*sorted([(sample['depth'].magnitude,
                            sample['Calibrated 14C Age'].magnitude)
                           for sample in core]))
     tck, u = scipy.interpolate.splprep(xyvals, s=200000)
     x_i, y_i = scipy.interpolate.splev(np.linspace(0, 1, 100), tck)
     xyvals = zip(*sorted([(x_i, y_i)]))
     core.properties['Age/Depth Model'] = datastructures.PointlistInterpolation(*xyvals,
             run = core.run)
示例#6
0
        def run_component(self, core, progress_dialog):
            '''Run BACON on the given core.

            core: the core data
            progress_dialog: a dialog box. Used to update progress on BACON.

            This calls the SWIG wrapper to BACON.
            It then updates
            core['all']['eggs'] = total_info
            and
            core.properties['Age/Depth Model']
            '''

            #build a guess for thickness similar to how Bacon's R code does it
            #section thickness is the expected granularity of change within the
            #core. currently, we are using the default BACON parameter of 5 (cm);
            #note that we can use a v large thickness to do a ballpark fast,
            #and that BACON suggests limiting # of sections/core to between 10
            #and 200.

            #manual suggests for "larger" (>~2-3 m) cores, a good approach is
            #to start thick very high (say, 50) and lower it until a "smooth
            #enough" model is found.
            def scaledepth(key):
                return float(core[key]['depth'].rescale('cm').magnitude)

            thickguess = 5
            mindepth = scaledepth(min(core.keys()))
            maxdepth = scaledepth(max(core.keys()))
            sections = (maxdepth - mindepth) / thickguess
            if sections < 10:
                thickguess = min(self.prettynum(
                    (sections / 10.0) * thickguess))
            elif sections > 200:
                thickguess = max(
                    self.prettynum((sections / 200.0) * thickguess))

            parameters = self.user_inputs(
                core,
                [('Bacon Number of Iterations', ('integer', None, False), 200),
                 ('Bacon Section Thickness',
                  ('float', 'cm', False), thickguess),
                 ('Bacon Memory: Mean', ('float', None, False), 0.7),
                 ('Bacon Memory: Strength', ('float', None, False), 4),
                 ('Bacon Difference',
                  ('float', None, False), max(core.keys()) - min(core.keys())),
                 ('Bacon Sections', ('float', None, False), sections),
                 ('Bacon t_a', ('integer', None, False), 4, {
                     'helptip': 't_b = t_a + 1'
                 })])

            num_iterations = parameters['Bacon Number of Iterations']
            sections = int(
                numpy.ceil((maxdepth - mindepth) /
                           parameters['Bacon Section Thickness'].magnitude))

            # progress_dialog.Update(1, "Initializing BACON")
            #TODO: make sure to actually use the right units...
            data = self.build_data_array(core)
            memorya, memoryb = self.find_mem_params(core)
            hiatusi = self.build_hiatus_array(core, data)

            guesses = numpy.round(
                numpy.random.normal(data[0][1], data[0][2], 2))
            guesses.sort()

            self.set_value(core, 'Bacon guess 1', guesses[0])
            self.set_value(core, 'Bacon guess 2', guesses[1])

            #create a temporary file for BACON to write its output to, so as
            #to read it back in later.
            #for the curious, this is not in prepare() because we want a new
            #file for every run of BACON, not every instance of this component.
            #self.tempfile = tempfile.NamedTemporaryFile()
            self.tempfile = open('tempfile', 'w+')
            #the size given is the # of (I think) accepted iterations that we
            #want in our final output file. BACON defaults this to 2000, so that's
            #what I'm using for now. Note the ability to tweak this value to
            #do quick-and-dirty vs. "good" models

            #minage & maxage are meant to indicate limits of calibration curves;
            #just giving really big #s there is okay.
            # progress_dialog.Update(2, "Running BACON Simulation")

            # int run_simulation(int numdets, PreCalDet** dets, int hdim, int numhiatus,
            #            double* hdata,
            #            int sections, double memorya, double memoryb, double minyr, double maxyr,
            #            double firstguess, double secondguess, double mindepth, double maxdepth,
            #            char* outfile, int numsamples)
            time_change = 30
            computation_progress = PBI.PyBusyInfo("Please Be Patient. \n" +
                                                  "Time Remaining: " +
                                                  str(time_change),
                                                  title="Runnning Computation")
            cfiles.baconc.run_simulation(
                len(data),
                [cfiles.baconc.PreCalDet(*sample)
                 for sample in data], hiatusi, sections, memorya, memoryb,
                -1000, 1000000, guesses[0], guesses[1], mindepth, maxdepth,
                self.tempfile.name, num_iterations)
            # progress_dialog.Update(8, "Writing Data")
            #I should do something here to clip the undesired burn-in off the
            #front of the file (default ~200)

            #for now, doing a lazy haxx where I'm just taking the trivial mean
            #of each depth=point age and calling that the "model age" at that
            #depth. Lots of interesting stuff here; plz consult a real statistician

            reader = csv.reader(self.tempfile,
                                dialect='excel-tab',
                                skipinitialspace=True)
            truethick = float(maxdepth - mindepth) / sections
            sums = [0] * (sections + 1)
            total_info = []

            depth = [mindepth + (truethick * i) for i in range(sections + 1)]

            total_info.append(depth)

            total = 0

            for it in reader:
                if not it:
                    continue
                path_ls = [0] * (sections + 1)
                total += 1
                #as read by csv, the bacon output file has an empty entry as its
                #first column, so we ignore that. 1st real column is a special case,
                #a set value instead of accumulation
                cumage = float(it[1])
                sums[0] += cumage
                #last 2 cols are not acc rates; they are "w" and "U"; currently
                #ignored, but related to it probability
                for ind, acc in enumerate(it[2:-2]):
                    cumage += truethick * float(acc)
                    sums[ind + 1] += cumage
                    path_ls[ind + 1] += cumage
                total_info.append(path_ls)
            sums = [sum / total for sum in sums]
            self.tempfile.close()

            core.properties[
                'Bacon Model Uncertainty'] = datastructures.BaconInfo(
                    total_info, core.partial_run.display_name)

            #TODO: are these depths fiddled with at all in the alg? should I make
            #sure to pass "pretty" ones?
            core.properties['Age/Depth Model'] = \
                datastructures.PointlistInterpolation(
                        [mindepth + truethick*ind for ind in range(len(sums))],
                        sums, core.partial_run.display_name)

            #output file as I understand it:
            #something with hiatuses I need to work out.
            #some number of rows of n columns. the last column is (?)

            #the 1st column appears to be the "correct" age of the youngest
            #point in the core
            #following columns up to the last 2 cols, which I am ignoring, are the
            #accepted *accumulation rate (years per cm)* for that segment of the core.
            # progress_dialog.Update(9)
            del computation_progress