Esempio n. 1
0
    def test_group_a_few_points(self):

        gen = GeoPointGen.generate_with_world_bounds(1000)

        points = []

        start = millis()

        for i in range(0, len(gen.points) - 1):

            haversine(
                (gen.points[i].latitude, gen.points[i].longitude),
                (gen.points[i + 1].latitude, gen.points[i + 1].longitude))

        print('haversine took ' + str(millis() - start))

        for geo_point in gen.points:
            points.append(TestPoint(geo_point.latitude, geo_point.longitude))

        start = millis()

        proximate_points_map = GeoDistancesGroupPoints.group_points(
            0, points, 1000)

        count = 0
        for value in proximate_points_map.values():
            if value != None:
                count = count + len(value)

        diff = millis() - start

        print('Took ' + str(diff) + ' milliseconds for ' +
              str(len(proximate_points_map)) + '/' + str(count))
Esempio n. 2
0
File: main.py Progetto: floor66/fsr
    def draw(self):
        # Draw when it's time to draw!
        if (millis() - self.draw_timer) >= self.REFRESH_MS.get():
            self.draw_timer = millis()

            # Remove annotations that are no longer in the current time window
            for i in range(0, len(self.annotations)):
                try:
                    t, msg, ln, txt = self.annotations[i]

                    if (t <= self.data_plot.get_xlim()[0]):
                        ln.remove()
                        del ln
                        txt.remove()
                        del txt
                        del self.annotations[i]
                except IndexError:
                    break

            self.data_plot.set_title(
                "Sensor data\nRecording: %s\n" %
                timerunning(time.time() - self.__rec_start__))
            self.data_plot.xaxis.set_major_formatter(
                FuncFormatter(lambda x, pos: timerunning(x / 1000)))
            self.do_auto_scale()

            # Speeds up drawing tremendously
            self.data_plot.draw_artist(self.data_plot.patch)

            for i in range(0, self.NUM_ANALOG):
                if i in self.SHOW_PINS:
                    self.data_plot.draw_artist(self.plot_lines[i])

            self.fig.canvas.draw_idle()
            self.fig.canvas.flush_events()
Esempio n. 3
0
    def run(self):
        temperature_count = 0
        last_temp = 0
        pid = 0

        while True:

            if self.state == Oven.STATE_RUNNING:
                if self.simulate:
                    self.runtime += 0.5
                else:
                    runtime_delta = datetime.datetime.now() - self.start_time
                    self.runtime = runtime_delta.total_seconds()

                if millis() - self.profile.pidStart >= pid_cycle:
                    self.profile.pidStart = millis()
                    self.target = self.profile.update_pid(
                        self.temp_sensor.temperature)
                    self.pid.setpoint = self.target
                    pid = self.pid(self.temp_sensor.temperature)
                    log.info(
                        "update pid at %.1f deg F (Target: %.1f) , PID %.1f, phase % .1s"
                        % (self.temp_sensor.temperature, self.target, pid,
                           "Hold" if self.profile.segPhase == 1 else "Ramp"))

                # Capture the last temperature value. This must be done before set_heat, since there is a sleep
                last_temp = self.temp_sensor.temperature
                self.set_heat2(pid, self.profile.pidStart)
                # Update the schedule segment
                self.profile.update_seg(self.temp_sensor.temperature)

                if self.profile.finished():
                    self.reset()
Esempio n. 4
0
    def hover(e):
        global __start__
        
        if ((millis() - __start__) < 50):
            return
        else:
            __start__ = millis()
        
        c = False
        
        for line in nots:
            if line.get_linestyle() != "dashed":
                line.set_linestyle("dashed")
                line.set_linewidth(1)
                c = True

            if e.inaxes == ax:
                cont, ind = line.contains(e)

                if cont:
                    line.set_linestyle("solid")
                    line.set_linewidth(2)
                    c = True

        if c:
            fig.canvas.draw_idle()
Esempio n. 5
0
    def check_with_random_data(self, num_points, geo_bounds, max_diameter_km):

        gen = GeoPointGen.generate_with_bounds(num_points, geo_bounds)

        points = make_test_points(gen.points)

        results = {}

        for algorithm_name, implementation in self.geo_distances_algorithms.items(
        ):

            start = millis()

            all_distances = []

            implementation.make_distances_with_max(
                0, points, geo_bounds, max_diameter_km,
                lambda distance: all_distances.append(distance))

            points_set = set()
            all_distances.sort()

            results[algorithm_name] = _Result(points_set, all_distances)

            print('Executed ' + algorithm_name + ' in ' +
                  str(millis() - start) + ', got ' + str(len(all_distances)))
Esempio n. 6
0
    def run_profile(self, profile):
        log.info("Running profile %s" % profile.name)
        self.profile = profile
        self.profile.running = True
        # Ramp-hold init
        self.profile.rampStart = millis()
        self.profile.pidStart = millis()
        self.profile.segNum = 1

        self.state = Oven.STATE_RUNNING
        self.start_time = datetime.datetime.now()
        log.info("Starting")
Esempio n. 7
0
def build_custom_time(custom_time):
    now = datetime.datetime.utcnow()
    time_filters = {
        "yesterday": (
            millis(
                now.replace(hour=0, minute=0, second=0, microsecond=0) -
                datetime.timedelta(days=1, seconds=1)),
            millis(
                now.replace(hour=0, minute=0, second=0, microsecond=0) -
                datetime.timedelta(minutes=2, seconds=1)),
        )
    }
    return time_filters[custom_time]
def run():
    """Reads a frame from the video, tracks the pucks and
    returns the position data to the caller"""
    global writer, totalTimePassed, firstRun, showOutputSeperate

    if firstRun:
        firstRun = False
        openAndConfigureWindow()

    startTime = utils.millis()

    #Read next frame
    success, frame = capture.read()
    if not success: return False, 0, None, 0

    #Crop frame
    frame = frame[top:bottom, left:right]

    #Track objects
    displayFrame, positions = multi_tracker.trackObjects(frame)

    if DEBUG:
        frameMax = int(capture.get(cv.CAP_PROP_FRAME_COUNT))
        frameCount = int(capture.get(cv.CAP_PROP_POS_FRAMES))
        time = utils.millis() - startTime
        totalTimePassed += time
        secs = int((time * (frameMax - frameCount)) / 1000)
        countStr = "Frame: " + str(frameCount) + "/" + str(frameMax)
        ellapStr = "Ellapsed: " + utils.clockStringFromSecs(
            totalTimePassed / 1000)
        etaStr = "ETA: " + utils.clockStringFromSecs(secs)
        cv.putText(displayFrame, countStr, (0, 25), cv.FONT_HERSHEY_PLAIN, 2,
                   (0, 255, 0))
        cv.putText(displayFrame, ellapStr, (0, 50), cv.FONT_HERSHEY_PLAIN, 2,
                   (0, 255, 0))
        cv.putText(displayFrame, etaStr, (0, 75), cv.FONT_HERSHEY_PLAIN, 2,
                   (0, 255, 0))

    #Display frame and write it to the video file
    if showOutputSeperate:
        cv.imshow("Tracker", displayFrame)
        cv.waitKey(1)
    if writeVideo:
        writer.write(displayFrame)

    timeElapsed = utils.millis() - startTime

    #Returns status, frame counter, current puck positions and the time taken for processing this frame
    return True, capture.get(
        cv.CAP_PROP_POS_FRAMES), positions, timeElapsed, displayFrame
Esempio n. 9
0
File: main.py Progetto: floor66/fsr
    def init_serial(self):
        self.can_start = False  # To wait for Arduino to give the go-ahead

        # Wait for serial connection
        timer = millis()
        while True:
            self.update_gui()

            if not self.recording:
                return False

            try:
                self.ser = serial.Serial(self.COM_PORT.get(),
                                         self.BAUD_RATE.get())
                break
            except serial.SerialException as e:
                if (millis() - timer) >= 1000:  # Give an error every second
                    self.status("Connect Arduino to USB!")
                    self.logger.log("Connect Arduino to USB!")
                    timer = millis()

        # Wait for the go-ahead from Arduino
        timer = millis()
        while True:
            self.update_gui()

            if not self.recording:
                return False

            try:
                data_in = self.ser.readline()
            except Exception as e:
                self.logger.log(e)

            if len(data_in) > 0:
                try:
                    data_in = data_in.decode().rstrip()

                    if data_in == "INIT_COMPLETE":
                        self.can_start = True
                        return True
                except Exception as e:
                    self.logger.log(e)

            if (millis() - timer) >= (self.INIT_TIMEOUT * 1000):
                self.logger.log("Arduino failed to initialize after %i sec" %
                                self.INIT_TIMEOUT)
                return False
Esempio n. 10
0
    def update_pid(self, temp_sensor):
        # Get the last target temperature
        if self.segNum == 1:  # Set to terhmocouple temperature for first segment
            self.lastTemp = 75
        else:
            self.lastTemp = self.segTemps[self.segNum - 2]

        # Calculate the new set point value.  Don't set above / below target temp
        if self.segPhase == 0:
            ramp_hours = (millis() - self.rampStart) / 3600000.0
            calc_set_point = self.lastTemp + (
                self.segRamps[self.segNum - 1] * ramp_hours)  # Ramp
            if self.segRamps[self.segNum -
                             1] >= 0 and calc_set_point >= self.segTemps[
                                 self.segNum - 1]:
                calc_set_point = self.segTemps[self.segNum - 1]

            if self.segRamps[self.segNum -
                             1] < 0 and calc_set_point <= self.segTemps[
                                 self.segNum - 1]:
                calc_set_point = self.segTemps[self.segNum - 1]

        else:
            calc_set_point = self.segTemps[self.segNum - 1]  # Hold

        return calc_set_point
Esempio n. 11
0
    def fade(self):
        if (self.lastUpdate + 90 < millis()):
            if self.brightness > 4:
                self.brightness -= 5
        elif self.brightness < 251:
            self.brightness += 5

        self.npx.setBrightness(self.brightness)
        self.npx.show()
Esempio n. 12
0
 def set_heat2(self, value, pidstart):
     if value * 1000 >= millis() - pidstart:
         self.heat = 1.0
         if gpio_available:
             log.info("Heat is ON")
             GPIO.output(config.gpio_heat, GPIO.HIGH)
     else:
         self.heat = 0.0
         if gpio_available:
             GPIO.output(config.gpio_heat, GPIO.LOW)
Esempio n. 13
0
    def update_seg(self, temp_sensor):
        # Start the hold phase
        if ((self.segPhase == 0 and self.segRamps[self.segNum - 1] < 0
             and temp_sensor <= (self.segTemps[self.segNum - 1] + temp_range))
                or (self.segPhase == 0 and self.segRamps[self.segNum - 1] >= 0
                    and temp_sensor >=
                    (self.segTemps[self.segNum - 1] - temp_range))):
            self.segPhase = 1
            self.holdStart = millis()

        # Go to the next segment
        if self.segPhase == 1 and (millis() - self.holdStart >=
                                   self.segHolds[self.segNum - 1] * 60000):
            self.segNum = self.segNum + 1
            self.segPhase = 0
            self.rampStart = millis()

        # Check if complete
        if self.segNum - 1 > self.numSegments:
            self.running = False
Esempio n. 14
0
    def __init__(self, npx, freq_start, freq_end):
        self.npx = npx
        self.index = 0
        self.offset = 40

        self.a = freq_start
        self.b = freq_end
        self.wide = freq_end - freq_start
        self.lastUpdate = millis()

        self.brightness = 255
Esempio n. 15
0
    def mine(self, difficulty, diff_bits=None):
        if (diff_bits != None):
            if (diff_bits < difficulty):  # diff bits must be over or equal to difficulty
                return 'diff_bits too low'
            elif (diff_bits > 0):
                return 'bad diff_bits'
            else:
                # set self diff bits to the specifyed diff_bits
                self.diff_bits = diff_bits
        else:
            # set the target diff bits to the minimum difficulty (difficulty var)
            self.diff_bits = difficulty
            txn_count = 0
            for _ in self.transactions:
                txn_count += 1
            if (txn_count > 1):
                if (self.transactions[0].type != 1):
                    amount = BLOCK_REWARD
                    coinbase_txn = coinbase(MINING_ADDR, amount)
                    tmp = self
                    for i in range(len(self.transactions)):
                        if (i == 0):
                            tmp.transactions[0] = coinbase_txn
                        else:
                            tmp.transactions[i] = self.transactions[i-1]
                    tmp.transactions.append(self.transactions[-1])
                    self.transactions = tmp.transactions
            else:
                self.transactions.append(coinbase(MINING_ADDR, BLOCK_REWARD))
            work = self.as_bytes()
            target = diff2target(self.diff_bits)
            start = millis()
            begin = start
            hashes = 0
            for nonce in range(max_nonce):
                hashes += 1
                if (millis() - start >= 60000):
                    log.info(
                        "Mining at {} h/s".format(math.ceil(hashes/((millis()-start)/1000))))
                    start = millis()
                    hashes = 0
                # increment the nonce
                self.nonce = nonce
                work = self.as_bytes()

                # hash the block
                hash_result = sha256(work)
#				print("hash={} nonce={} value={} target={}".format(hash_result, nonce, int(hash_result, 16), target))
        # check if this is a valid result, below the target
                if (check_diff(self.diff_bits, hash_result) == True):
                    #  set the hash of self to the hash we found
                    self.hash = str(hash_result)
                    if nonce > 0 and millis()-begin > 0:
                        log.info(
                            "Avg. hashrate={} h/s".format(math.ceil(nonce/((millis()-begin)/1000))))
                    return None
Esempio n. 16
0
    def run(self):

        global reqQueue
        global serialport

        global evesGate

        while not self.currEvent.isSet():
            # get async response relevant to status
            resp = serialport.readline().decode()
            if (len(resp) > 0):
                print('state async message from controller - ' + resp)
                type = resp[1:3]
                statecode = int(resp[4:6])
                newPayload = '{"state":{"desired":{"station_state":' + str(
                    statecode) + '}}}'
                evesGate.deviceShadowInstance.shadowUpdate(newPayload, None, 5)
                print('update message ' + newPayload)

            if (utils.millis() - self.lastQueueTime >= self.requestInterval):
                ### Add requests into queue
                reqQueue.put('$GS*BE\r')  # Get state
                reqQueue.put('$GG*B2\r')  # get charging current and voltage
                reqQueue.put('$GC*AE\r')  # get charging capacity
                self.lastQueueTime = utils.millis()

            if (not reqQueue.Empty()):
                command = reqQueue.get()
                buff = command.encode()

                # Write Request Packet
                serialport.write(buff)
                resp = serialport.readline().decode()

                if (len(resp) > 0):
                    evesGate.timeout(False)
                    # check command type
                    type = command[1:4]
                    if (type == 'GS'):
                        # Get state
                        currstate = int(resp[4:5])
                        evesGate.updateState(currstate)

                    elif (type == 'GG'):
                        # get charging current & voltage
                        firstspace = resp.find(' ')
                        secondspace = resp.find(' ', firstspace + 1)
                        if (firstspace != -1 and secondspace != -1):
                            current = int(resp[firstspace + 1:secondspace])
                            voltage = int(resp[secondspace + 1:])
                            evesGate.updateCurVolt(current, voltage)

                    elif (type == 'GC'):
                        # get charging capacity
                        firstspace = resp.find(' ')
                        secondspace = resp.find(' ', firstspace + 1)
                        if (firstspace != -1 and secondspace != -1):
                            max = int(resp[firstspace + 1:secondspace])
                            min = int(resp[secondspace + 1:])
                            evesGate.updateChargingCapacity(max, min)

                else:
                    evesGate.timeout(True)

                self.lastReqTime = utils.millis()
                '''
				buff = reqQueue.get().encode()
				serialport.write(buff)
				self.lastReqTime = utils.millis()
				'''

            resp = serialport.readline()
            length = len(resp)
            if (length > 0):  # Reponse Process
                print 'Response from controller is received!'
                self.response(resp)
def main(ini_path=None, overwrite_flag=False, delay_time=0, gee_key_file=None,
         max_ready=-1, cron_flag=False, reverse_flag=False):
    """Compute daily Tcorr images

    Parameters
    ----------
    ini_path : str
        Input file path.
    overwrite_flag : bool, optional
        If True, overwrite existing files if the export dates are the same and
        generate new images (but with different export dates) even if the tile
        lists are the same.  The default is False.
    delay_time : float, optional
        Delay time in seconds between starting export tasks (or checking the
        number of queued tasks, see "max_ready" parameter).  The default is 0.
    gee_key_file : str, None, optional
        Earth Engine service account JSON key file (the default is None).
    max_ready: int, optional
        Maximum number of queued "READY" tasks.  The default is -1 which is
        implies no limit to the number of tasks that will be submitted.
    cron_flag : bool, optional
        If True, only compute Tcorr daily image if existing image does not have
        all available image (using the 'wrs2_tiles' property) and limit the
        date range to the last 64 days (~2 months).
    reverse_flag : bool, optional
        If True, process dates in reverse order.
    """
    logging.info('\nCompute daily Tcorr images')

    ini = utils.read_ini(ini_path)

    model_name = 'SSEBOP'
    # model_name = ini['INPUTS']['et_model'].upper()

    tmax_name = ini[model_name]['tmax_source']

    export_id_fmt = 'tcorr_image_{product}_{date}_{export}'
    asset_id_fmt = '{coll_id}/{date}_{export}'

    tcorr_daily_coll_id = '{}/{}_daily'.format(
        ini['EXPORT']['export_coll'], tmax_name.lower())

    if (tmax_name.upper() == 'CIMIS' and
            ini['INPUTS']['end_date'] < '2003-10-01'):
        logging.error(
            '\nCIMIS is not currently available before 2003-10-01, exiting\n')
        sys.exit()
    elif (tmax_name.upper() == 'DAYMET' and
            ini['INPUTS']['end_date'] > '2018-12-31'):
        logging.warning(
            '\nDAYMET is not currently available past 2018-12-31, '
            'using median Tmax values\n')
        # sys.exit()
    # elif (tmax_name.upper() == 'TOPOWX' and
    #         ini['INPUTS']['end_date'] > '2017-12-31'):
    #     logging.warning(
    #         '\nDAYMET is not currently available past 2017-12-31, '
    #         'using median Tmax values\n')
    #     # sys.exit()

    # Extract the model keyword arguments from the INI
    # Set the property name to lower case and try to cast values to numbers
    model_args = {
        k.lower(): float(v) if utils.is_number(v) else v
        for k, v in dict(ini[model_name]).items()}
    # et_reference_args = {
    #     k: model_args.pop(k)
    #     for k in [k for k in model_args.keys() if k.startswith('et_reference_')]}


    logging.info('\nInitializing Earth Engine')
    if gee_key_file:
        logging.info('  Using service account key file: {}'.format(gee_key_file))
        # The "EE_ACCOUNT" parameter is not used if the key file is valid
        ee.Initialize(ee.ServiceAccountCredentials('x', key_file=gee_key_file),
                      use_cloud_api=True)
    else:
        ee.Initialize(use_cloud_api=True)

    # Get a Tmax image to set the Tcorr values to
    logging.debug('\nTmax properties')
    tmax_source = tmax_name.split('_', 1)[0]
    tmax_version = tmax_name.split('_', 1)[1]
    if 'MEDIAN' in tmax_name.upper():
        tmax_coll_id = 'projects/earthengine-legacy/assets/' \
                       'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower())
        tmax_coll = ee.ImageCollection(tmax_coll_id)
        tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0)
    else:
        # TODO: Add support for non-median tmax sources
        raise ValueError('unsupported tmax_source: {}'.format(tmax_name))
    logging.debug('  Collection: {}'.format(tmax_coll_id))
    logging.debug('  Source:  {}'.format(tmax_source))
    logging.debug('  Version: {}'.format(tmax_version))

    logging.debug('\nExport properties')
    export_info = utils.get_info(ee.Image(tmax_mask))
    if 'daymet' in tmax_name.lower():
        # Custom smaller extent for DAYMET focused on CONUS
        export_extent = [-1999750, -1890500, 2500250, 1109500]
        export_shape = [4500, 3000]
        export_geo = [1000, 0, -1999750, 0, -1000, 1109500]
        # Custom medium extent for DAYMET of CONUS, Mexico, and southern Canada
        # export_extent = [-2099750, -3090500, 2900250, 1909500]
        # export_shape = [5000, 5000]
        # export_geo = [1000, 0, -2099750, 0, -1000, 1909500]
        export_crs = export_info['bands'][0]['crs']
    else:
        export_crs = export_info['bands'][0]['crs']
        export_geo = export_info['bands'][0]['crs_transform']
        export_shape = export_info['bands'][0]['dimensions']
        # export_geo = ee.Image(tmax_mask).projection().getInfo()['transform']
        # export_crs = ee.Image(tmax_mask).projection().getInfo()['crs']
        # export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions']
        export_extent = [
            export_geo[2], export_geo[5] + export_shape[1] * export_geo[4],
            export_geo[2] + export_shape[0] * export_geo[0], export_geo[5]]
    logging.debug('  CRS: {}'.format(export_crs))
    logging.debug('  Extent: {}'.format(export_extent))
    logging.debug('  Geo: {}'.format(export_geo))
    logging.debug('  Shape: {}'.format(export_shape))


    # This extent will limit the WRS2 tiles that are included
    # This is needed especially for non-median DAYMET Tmax since the default
    #   extent is huge but we are only processing a subset
    if 'daymet' in tmax_name.lower():
        export_geom = ee.Geometry.Rectangle(
            [-125, 25, -65, 53], proj='EPSG:4326', geodesic=False)
        # export_geom = ee.Geometry.Rectangle(
        #     [-135, 15, -55, 60], proj='EPSG:4326', geodesic=False)
    elif 'cimis' in tmax_name.lower():
        export_geom = ee.Geometry.Rectangle(
            [-124, 35, -119, 42], proj='EPSG:4326', geodesic=False)
    else:
        export_geom = tmax_mask.geometry()


    # If cell_size parameter is set in the INI,
    # adjust the output cellsize and recompute the transform and shape
    try:
        export_cs = float(ini['EXPORT']['cell_size'])
        export_shape = [
            int(math.ceil(abs((export_shape[0] * export_geo[0]) / export_cs))),
            int(math.ceil(abs((export_shape[1] * export_geo[4]) / export_cs)))]
        export_geo = [export_cs, 0.0, export_geo[2], 0.0, -export_cs, export_geo[5]]
        logging.debug('  Custom export cell size: {}'.format(export_cs))
        logging.debug('  Geo: {}'.format(export_geo))
        logging.debug('  Shape: {}'.format(export_shape))
    except KeyError:
        pass

    if not ee.data.getInfo(tcorr_daily_coll_id):
        logging.info('\nExport collection does not exist and will be built'
                     '\n  {}'.format(tcorr_daily_coll_id))
        input('Press ENTER to continue')
        ee.data.createAsset({'type': 'IMAGE_COLLECTION'}, tcorr_daily_coll_id)

    # Get current asset list
    logging.debug('\nGetting GEE asset list')
    asset_list = utils.get_ee_assets(tcorr_daily_coll_id)
    if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
        pprint.pprint(asset_list[:10])

    # Get current running tasks
    tasks = utils.get_ee_tasks()
    if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
        logging.debug('  Tasks: {}\n'.format(len(tasks)))
        input('ENTER')


    collections = [x.strip() for x in ini['INPUTS']['collections'].split(',')]

    # Limit by year and month
    try:
        month_list = sorted(list(utils.parse_int_set(ini['TCORR']['months'])))
    except:
        logging.info('\nTCORR "months" parameter not set in the INI,'
                     '\n  Defaulting to all months (1-12)\n')
        month_list = list(range(1, 13))
    try:
        year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years'])))
    except:
        logging.info('\nTCORR "years" parameter not set in the INI,'
                     '\n  Defaulting to all available years\n')
        year_list = []

    # Key is cycle day, value is a reference date on that cycle
    # Data from: https://landsat.usgs.gov/landsat_acq
    # I only need to use 8 cycle days because of 5/7 and 7/8 are offset
    cycle_dates = {
        7: '1970-01-01',
        8: '1970-01-02',
        1: '1970-01-03',
        2: '1970-01-04',
        3: '1970-01-05',
        4: '1970-01-06',
        5: '1970-01-07',
        6: '1970-01-08',
    }
    # cycle_dates = {
    #     1:  '2000-01-06',
    #     2:  '2000-01-07',
    #     3:  '2000-01-08',
    #     4:  '2000-01-09',
    #     5:  '2000-01-10',
    #     6:  '2000-01-11',
    #     7:  '2000-01-12',
    #     8:  '2000-01-13',
    #     # 9:  '2000-01-14',
    #     # 10: '2000-01-15',
    #     # 11: '2000-01-16',
    #     # 12: '2000-01-01',
    #     # 13: '2000-01-02',
    #     # 14: '2000-01-03',
    #     # 15: '2000-01-04',
    #     # 16: '2000-01-05',
    # }
    cycle_base_dt = datetime.datetime.strptime(cycle_dates[1], '%Y-%m-%d')

    if cron_flag:
        # CGM - This seems like a silly way of getting the date as a datetime
        #   Why am I doing this and not using the commented out line?
        iter_end_dt = datetime.date.today().strftime('%Y-%m-%d')
        iter_end_dt = datetime.datetime.strptime(iter_end_dt, '%Y-%m-%d')
        iter_end_dt = iter_end_dt + datetime.timedelta(days=-4)
        # iter_end_dt = datetime.datetime.today() + datetime.timedelta(days=-1)
        iter_start_dt = iter_end_dt + datetime.timedelta(days=-64)
    else:
        iter_start_dt = datetime.datetime.strptime(
            ini['INPUTS']['start_date'], '%Y-%m-%d')
        iter_end_dt = datetime.datetime.strptime(
            ini['INPUTS']['end_date'], '%Y-%m-%d')
    logging.debug('Start Date: {}'.format(iter_start_dt.strftime('%Y-%m-%d')))
    logging.debug('End Date:   {}\n'.format(iter_end_dt.strftime('%Y-%m-%d')))


    for export_dt in sorted(utils.date_range(iter_start_dt, iter_end_dt),
                            reverse=reverse_flag):
        export_date = export_dt.strftime('%Y-%m-%d')
        next_date = (export_dt + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
        if month_list and export_dt.month not in month_list:
            logging.debug(f'Date: {export_date} - month not in INI - skipping')
            continue
        elif year_list and export_dt.year not in year_list:
            logging.debug(f'Date: {export_date} - year not in INI - skipping')
            continue
        elif export_date >= datetime.datetime.today().strftime('%Y-%m-%d'):
            logging.debug(f'Date: {export_date} - unsupported date - skipping')
            continue
        elif export_date < '1984-03-23':
            logging.debug(f'Date: {export_date} - no Landsat 5+ images before '
                         '1984-03-16 - skipping')
            continue
        logging.info(f'Date: {export_date}')

        export_id = export_id_fmt.format(
            product=tmax_name.lower(),
            date=export_dt.strftime('%Y%m%d'),
            export=datetime.datetime.today().strftime('%Y%m%d'))
        logging.debug('  Export ID: {}'.format(export_id))

        asset_id = asset_id_fmt.format(
            coll_id=tcorr_daily_coll_id,
            date=export_dt.strftime('%Y%m%d'),
            export=datetime.datetime.today().strftime('%Y%m%d'))
        logging.debug('  Asset ID: {}'.format(asset_id))

        if overwrite_flag:
            if export_id in tasks.keys():
                logging.debug('  Task already submitted, cancelling')
                ee.data.cancelTask(tasks[export_id]['id'])
            # This is intentionally not an "elif" so that a task can be
            # cancelled and an existing image/file/asset can be removed
            if asset_id in asset_list:
                logging.debug('  Asset already exists, removing')
                ee.data.deleteAsset(asset_id)
        else:
            if export_id in tasks.keys():
                logging.debug('  Task already submitted, exiting')
                continue
            elif asset_id in asset_list:
                logging.debug('  Asset already exists, skipping')
                continue

        # Build and merge the Landsat collections
        model_obj = ssebop.Collection(
            collections=collections,
            start_date=export_dt.strftime('%Y-%m-%d'),
            end_date=(export_dt + datetime.timedelta(days=1)).strftime(
                '%Y-%m-%d'),
            cloud_cover_max=float(ini['INPUTS']['cloud_cover']),
            geometry=export_geom,
            model_args=model_args,
            # filter_args=filter_args,
        )
        landsat_coll = model_obj.overpass(variables=['ndvi'])
        # wrs2_tiles_all = model_obj.get_image_ids()
        # pprint.pprint(landsat_coll.aggregate_array('system:id').getInfo())
        # input('ENTER')

        logging.debug('  Getting available WRS2 tile list')
        landsat_id_list = utils.get_info(landsat_coll.aggregate_array('system:id'))
        if not landsat_id_list:
            logging.info('  No available images - skipping')
            continue
        wrs2_tiles_all = set([id.split('_')[-2] for id in landsat_id_list])
        # print(wrs2_tiles_all)
        # print('\n')

        def tile_set_2_str(tiles):
            """Trying to build a more compact version of the WRS2 tile list"""
            tile_dict = defaultdict(list)
            for tile in tiles:
                tile_dict[int(tile[:3])].append(int(tile[3:]))
            tile_dict = {k: sorted(v) for k, v in tile_dict.items()}
            tile_str = json.dumps(tile_dict, sort_keys=True) \
                .replace('"', '').replace(' ', '')\
                .replace('{', '').replace('}', '')
            return tile_str
        wrs2_tiles_all_str = tile_set_2_str(wrs2_tiles_all)
        # pprint.pprint(wrs2_tiles_all_str)
        # print('\n')

        def tile_str_2_set(tile_str):
            # tile_dict = eval(tile_str)

            tile_set = set()
            for t in tile_str.replace('[', '').split('],'):
                path = int(t.split(':')[0])
                for row in t.split(':')[1].replace(']', '').split(','):
                    tile_set.add('{:03d}{:03d}'.format(path, int(row)))
            return tile_set
        # wrs2_tiles_all_dict = tile_str_2_set(wrs2_tiles_all_str)
        # pprint.pprint(wrs2_tiles_all_dict)


        # If overwriting, start a new export no matter what
        # The default is to no overwrite, so this mode will not be used often
        if not overwrite_flag:
            # Check if there are any previous images for this date
            # If so, only build a new Tcorr image if there are new wrs2_tiles
            #   that were not used in the previous image.
            # Should this code only be run in cron mode or is this the expected
            #   operation when (re)running for any date range?
            # Should we only test the last image
            # or all previous images for the date?
            logging.debug('  Checking for previous exports/versions of daily image')
            tcorr_daily_coll = ee.ImageCollection(tcorr_daily_coll_id)\
                .filterDate(export_date, next_date)\
                .limit(1, 'date_ingested', False)
            tcorr_daily_info = utils.get_info(tcorr_daily_coll)
            # pprint.pprint(tcorr_daily_info)
            # input('ENTER')

            if tcorr_daily_info['features']:
                # Assume we won't be building a new image and only set flag
                #   to True if the WRS2 tile lists are different
                export_flag = False

                # The ".limit(1, ..." on the tcorr_daily_coll above makes this
                # for loop and break statement unnecessary, but leaving for now
                for tcorr_img in tcorr_daily_info['features']:
                    # If the full WRS2 list is not present, rebuild the image
                    # This should only happen for much older Tcorr images
                    if 'wrs2_available' not in tcorr_img['properties'].keys():
                        logging.debug(
                            '    "wrs2_available" property not present in '
                            'previous export')
                        export_flag = True
                        break

                    # DEADBEEF - The wrs2_available property is now a string
                    # wrs2_tiles_old = set(tcorr_img['properties']['wrs2_available'].split(','))

                    # Convert available dict str to a list of path/rows
                    wrs2_tiles_old_str = tcorr_img['properties']['wrs2_available']
                    wrs2_tiles_old = tile_str_2_set(wrs2_tiles_old_str)

                    if wrs2_tiles_all != wrs2_tiles_old:
                        logging.debug('  Tile Lists')
                        logging.debug('  Previous: {}'.format(', '.join(
                            sorted(wrs2_tiles_old))))
                        logging.debug('  Available: {}'.format(', '.join(
                            sorted(wrs2_tiles_all))))
                        logging.debug('  New: {}'.format(', '.join(
                            sorted(wrs2_tiles_all.difference(wrs2_tiles_old)))))
                        logging.debug('  Dropped: {}'.format(', '.join(
                            sorted(wrs2_tiles_old.difference(wrs2_tiles_all)))))

                        export_flag = True
                        break

                if not export_flag:
                    logging.debug('  No new WRS2 tiles/images - skipping')
                    continue
                # else:
                #     logging.debug('    Building new version')
            else:
                logging.debug('    No previous exports')

        def tcorr_img_func(image):
            t_obj = ssebop.Image.from_landsat_c1_toa(
                ee.Image(image), **model_args)
            t_stats = ee.Dictionary(t_obj.tcorr_stats) \
                .combine({'tcorr_p5': 0, 'tcorr_count': 0}, overwrite=False)
            tcorr = ee.Number(t_stats.get('tcorr_p5'))
            count = ee.Number(t_stats.get('tcorr_count'))

            # Remove the merged collection indices from the system:index
            scene_id = ee.List(
                ee.String(image.get('system:index')).split('_')).slice(-3)
            scene_id = ee.String(scene_id.get(0)).cat('_') \
                .cat(ee.String(scene_id.get(1))).cat('_') \
                .cat(ee.String(scene_id.get(2)))

            return tmax_mask.add(tcorr) \
                .rename(['tcorr']) \
                .clip(image.geometry()) \
                .set({
                    'system:time_start': image.get('system:time_start'),
                    'scene_id': scene_id,
                    'wrs2_path': ee.Number.parse(scene_id.slice(5, 8)),
                    'wrs2_row': ee.Number.parse(scene_id.slice(8, 11)),
                    'wrs2_tile': scene_id.slice(5, 11),
                    'spacecraft_id': image.get('SPACECRAFT_ID'),
                    'tcorr': tcorr,
                    'count': count,
                })
        # Test for one image
        # pprint.pprint(tcorr_img_func(ee.Image(landsat_coll \
        #     .filterMetadata('WRS_PATH', 'equals', 36) \
        #     .filterMetadata('WRS_ROW', 'equals', 33).first())).getInfo())
        # input('ENTER')

        # (Re)build the Landsat collection from the image IDs
        landsat_coll = ee.ImageCollection(landsat_id_list)
        tcorr_img_coll = ee.ImageCollection(landsat_coll.map(tcorr_img_func)) \
            .filterMetadata('count', 'not_less_than',
                            float(ini['TCORR']['min_pixel_count']))

        # If there are no Tcorr values, return an empty image
        tcorr_img = ee.Algorithms.If(
            tcorr_img_coll.size().gt(0),
            tcorr_img_coll.median(),
            tmax_mask.updateMask(0))


        # Build the tile list as a string of a dictionary of paths and rows
        def tile_dict(path):
            # Get the row list for each path
            rows = tcorr_img_coll\
                .filterMetadata('wrs2_path', 'equals', path)\
                .aggregate_array('wrs2_row')
            # Convert rows to integers (otherwise they come back as floats)
            rows = ee.List(rows).sort().map(lambda row: ee.Number(row).int())
            return ee.Number(path).format('%d').cat(':[')\
                .cat(ee.List(rows).join(',')).cat(']')

        path_list = ee.List(tcorr_img_coll.aggregate_array('wrs2_path'))\
            .distinct().sort()
        wrs2_tile_str = ee.List(path_list.map(tile_dict)).join(',')
        # pprint.pprint(wrs2_tile_str.getInfo())
        # input('ENTER')

        # # DEADBEEF - This works but is really slow because of the getInfo
        # logging.debug('  Getting Tcorr collection tile list')
        # wrs2_tile_list = utils.get_info(
        #     tcorr_img_coll.aggregate_array('wrs2_tile'))
        # wrs2_tile_str = tile_set_2_str(wrs2_tile_list)
        # pprint.pprint(wrs2_tile_list)
        # pprint.pprint(wrs2_tile_str)
        # input('ENTER')

        # DEADBEEF - Old approach, tile lists for big areas are too long
        # def unique_properties(coll, property):
        #     return ee.String(ee.List(ee.Dictionary(
        #         coll.aggregate_histogram(property)).keys()).join(','))
        # wrs2_tile_list = ee.String('').cat(unique_properties(
        #     tcorr_img_coll, 'wrs2_tile'))
        # wrs2_tile_list = set([id.split('_')[-2] for id in wrs2_tile_list])


        def unique_properties(coll, property):
            return ee.String(ee.List(ee.Dictionary(
                coll.aggregate_histogram(property)).keys()).join(','))
        landsat_list = ee.String('').cat(unique_properties(
            tcorr_img_coll, 'spacecraft_id'))


        # Cast to float and set properties
        tcorr_img = ee.Image(tcorr_img).rename(['tcorr']).double() \
            .set({
                'system:time_start': utils.millis(export_dt),
                'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
                'date': export_dt.strftime('%Y-%m-%d'),
                'year': int(export_dt.year),
                'month': int(export_dt.month),
                'day': int(export_dt.day),
                'doy': int(export_dt.strftime('%j')),
                'cycle_day': ((export_dt - cycle_base_dt).days % 8) + 1,
                'landsat': landsat_list,
                'model_name': model_name,
                'model_version': ssebop.__version__,
                'tmax_source': tmax_source.upper(),
                'tmax_version': tmax_version.upper(),
                'wrs2_tiles': wrs2_tile_str,
                'wrs2_available': wrs2_tiles_all_str,
            })
        # pprint.pprint(tcorr_img.getInfo()['properties'])
        # input('ENTER')

        logging.debug('  Building export task')
        task = ee.batch.Export.image.toAsset(
            image=ee.Image(tcorr_img),
            description=export_id,
            assetId=asset_id,
            crs=export_crs,
            crsTransform='[' + ','.join(list(map(str, export_geo))) + ']',
            dimensions='{0}x{1}'.format(*export_shape),
        )

        logging.info('  Starting export task')
        utils.ee_task_start(task)

        # Pause before starting the next export task
        utils.delay_task(delay_time, max_ready)
        logging.debug('')
Esempio n. 18
0
show_bands = False
stats.append((0, len(trials) // 2))
stats.append((len(trials) // 2, len(trials)))
##
FREQ = 10 # Measuring frequency
SHOW_EVERY = 1 # Show only every x measurements
GAP_THRESHOLD = 2000 # delete gaps greater than x msec (likely artefacts, see Figures/Data_artefacts
MAVG_WIND = 1000 # Msec window for moving average (50 * 20 = 1 sec)
##

raws = []

fig = None
art = []
for fn, wire, baseline, sensor in trials:
    __start__ = millis()
    wires.append(wire)

    f = open("sensordata/%s.txt" % fn.replace("annotations", "data"))
    data_lines = f.readlines()

    try:
        f = open("sensordata/%s.txt" % fn.replace("data", "annotations"))
        annot_lines = f.readlines()
    except FileNotFoundError:
        annot_lines = []

    # Get data + convert to val/volt/resist
    ts = []
    vals = []
    volts = []
Esempio n. 19
0
File: main.py Progetto: floor66/fsr
    def record(self):
        if not self.can_start:
            return False

        self.draw_timer = millis()
        while self.recording:
            self.update_gui()

            try:
                data_in = self.ser.readline()
            except serial.serialutil.SerialException as e:
                self.logger.log("Reading from the serial port failed: %s" % e)
            finally:
                if not self.recording:
                    return

            # Check the received data
            if len(data_in) > 1:
                data_in = data_in.decode()
                unpack = data_in.rstrip().split(",")

                if len(unpack) == 3:  # We expect 3 variables. No more, no less
                    try:
                        timestamp = int(unpack[0])
                        pin = int(unpack[1])
                        res_val = int(unpack[2])
                    except ValueError:
                        self.logger.log("Faulty serial communication: %s" %
                                        ",".join(unpack))
                        continue

                    if pin in self.REC_PINS:
                        self.curr_rec_count += 1
                        self.save_data(data_in)  # Save the data to file

                    # Display readout in the proper label
                    self.sensor_readouts[pin].config(
                        text="Pin A%i: %i mV / %.02f N" %
                        (pin, self.calc.val_to_volt(res_val) * 1000,
                         self.calc.val_to_N(res_val)))

                    if not pin in self.SHOW_PINS:  # Skip the pins we don't want/need to read
                        continue

                    self.times[pin].append(timestamp)
                    self.resistor_data_raw[pin].append(res_val)

                    # Here we can interject and do calculations based on which y-axis unit we want to see
                    opt = self.y_unit_opts.index(self.y_unit.get())

                    if opt == self.OPT_RAW:
                        self.resistor_data[pin].append(res_val)
                    elif opt == self.OPT_VOLTAGE:
                        a = self.calc.val_to_volt(res_val) * 1000
                        self.resistor_data[pin].append(a)
                    elif opt == self.OPT_RESISTANCE:
                        a = self.calc.volt_to_Rfsr(
                            self.calc.val_to_volt(res_val))
                        self.resistor_data[pin].append(a)
                    elif opt == self.OPT_CONDUCTANCE:
                        a = 10**6 / self.calc.volt_to_Rfsr(
                            self.calc.val_to_volt(
                                res_val)) if res_val > 0 else 0
                        self.resistor_data[pin].append(a)
                    elif opt == self.OPT_VOLTAGE_AVG:
                        a = sum([
                            self.calc.val_to_volt(v) * 1000
                            for v in self.resistor_data_raw[pin]
                        ]) / len(self.resistor_data_raw[pin]) if len(
                            self.resistor_data_raw[pin]) > 0 else 0
                        self.resistor_data[pin].append(a)
                    elif opt == self.OPT_RESISTANCE_AVG:
                        a = sum([self.calc.volt_to_Rfsr(self.calc.val_to_volt(v)) for v in self.resistor_data_raw[pin]]) / len(self.resistor_data_raw[pin]) \
                            if len(self.resistor_data_raw[pin]) > 0 else 0
                        self.resistor_data[pin].append(a)
                    elif opt == self.OPT_CONDUCTANCE_AVG:
                        a = sum([10**6 / self.calc.volt_to_Rfsr(self.calc.val_to_volt(v)) if v > 0 else 0 for v in self.resistor_data_raw[pin]]) / len(self.resistor_data_raw[pin]) \
                            if len(self.resistor_data_raw[pin]) > 0 else 0
                        self.resistor_data[pin].append(a)

                    self.plot_lines[pin].set_data(self.times[pin],
                                                  self.resistor_data[pin])

                    if len(self.times[pin]) > self.POP_CUTOFF.get():
                        self.times[pin] = self.times[pin][-self.POP_CUTOFF.get(
                        ):]
                        self.resistor_data_raw[pin] = self.resistor_data_raw[
                            pin][-self.POP_CUTOFF.get():]
                        self.resistor_data[pin] = self.resistor_data[pin][
                            -self.POP_CUTOFF.get():]

            self.draw()
Esempio n. 20
0
def main(ini_path=None,
         overwrite_flag=False,
         delay=0,
         key=None,
         cron_flag=False,
         reverse_flag=False):
    """Compute daily dT images

    Parameters
    ----------
    ini_path : str
        Input file path.
    overwrite_flag : bool, optional
        If True, generate new images (but with different export dates) even if
        the dates already have images.  If False, only generate images for
        dates that are missing. The default is False.
    delay : float, optional
        Delay time between each export task (the default is 0).
    key : str, optional
        File path to an Earth Engine json key file (the default is None).
    reverse_flag : bool, optional
        If True, process dates in reverse order.

    """
    logging.info('\nCompute daily dT images')

    ini = utils.read_ini(ini_path)

    model_name = 'SSEBOP'
    # model_name = ini['INPUTS']['et_model'].upper()

    if ini[model_name]['dt_source'].upper() == 'CIMIS':
        daily_coll_id = 'projects/climate-engine/cimis/daily'
    elif ini[model_name]['dt_source'].upper() == 'DAYMET':
        daily_coll_id = 'NASA/ORNL/DAYMET_V3'
    elif ini[model_name]['dt_source'].upper() == 'GRIDMET':
        daily_coll_id = 'IDAHO_EPSCOR/GRIDMET'
    else:
        raise ValueError('dt_source must be CIMIS, DAYMET, or GRIDMET')

    # Check dates
    if (ini[model_name]['dt_source'].upper() == 'CIMIS'
            and ini['INPUTS']['end_date'] < '2003-10-01'):
        logging.error(
            '\nCIMIS is not currently available before 2003-10-01, exiting\n')
        sys.exit()
    elif (ini[model_name]['dt_source'].upper() == 'DAYMET'
          and ini['INPUTS']['end_date'] > '2017-12-31'):
        logging.warning('\nDAYMET is not currently available past 2017-12-31, '
                        'using median Tmax values\n')
        # sys.exit()
    # elif (ini[model_name]['tmax_source'].upper() == 'TOPOWX' and
    #         ini['INPUTS']['end_date'] > '2017-12-31'):
    #     logging.warning(
    #         '\nDAYMET is not currently available past 2017-12-31, '
    #         'using median Tmax values\n')
    #     # sys.exit()

    logging.info('\nInitializing Earth Engine')
    if key:
        logging.info('  Using service account key file: {}'.format(key))
        # The "EE_ACCOUNT" parameter is not used if the key file is valid
        ee.Initialize(ee.ServiceAccountCredentials('deadbeef', key_file=key))
    else:
        ee.Initialize()

    # Output dT daily image collection
    dt_daily_coll_id = '{}/{}_daily'.format(
        ini['EXPORT']['export_coll'], ini[model_name]['dt_source'].lower())

    # Get an input image to set the dT values to
    logging.debug('\nInput properties')
    dt_name = ini[model_name]['dt_source']
    dt_source = dt_name.split('_', 1)[0]
    # dt_version = dt_name.split('_', 1)[1]
    daily_coll = ee.ImageCollection(daily_coll_id)
    dt_img = ee.Image(daily_coll.first()).select([0])
    dt_mask = dt_img.multiply(0)
    logging.debug('  Collection: {}'.format(daily_coll_id))
    logging.debug('  Source: {}'.format(dt_source))
    # logging.debug('  Version: {}'.format(dt_version))

    logging.debug('\nExport properties')
    export_proj = dt_img.projection().getInfo()
    export_geo = export_proj['transform']
    if 'crs' in export_proj.keys():
        export_crs = export_proj['crs']
    elif 'wkt' in export_proj.keys():
        export_crs = re.sub(',\s+', ',', export_proj['wkt'])
    export_shape = dt_img.getInfo()['bands'][0]['dimensions']
    export_extent = [
        export_geo[2], export_geo[5] + export_shape[1] * export_geo[4],
        export_geo[2] + export_shape[0] * export_geo[0], export_geo[5]
    ]
    logging.debug('  CRS:    {}'.format(export_crs))
    logging.debug('  Extent: {}'.format(export_extent))
    logging.debug('  Geo:    {}'.format(export_geo))
    logging.debug('  Shape:  {}'.format(export_shape))

    # Get current asset list
    if ini['EXPORT']['export_dest'].upper() == 'ASSET':
        logging.debug('\nGetting asset list')
        # DEADBEEF - daily is hardcoded in the asset_id for now
        asset_list = utils.get_ee_assets(dt_daily_coll_id)
    else:
        raise ValueError('invalid export destination: {}'.format(
            ini['EXPORT']['export_dest']))

    # Get current running tasks
    tasks = utils.get_ee_tasks()
    if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
        logging.debug('  Tasks: {}\n'.format(len(tasks)))
        input('ENTER')

    # Limit by year and month
    try:
        month_list = sorted(list(utils.parse_int_set(ini['INPUTS']['months'])))
    except:
        logging.info('\nINPUTS "months" parameter not set in the INI,'
                     '\n  Defaulting to all months (1-12)\n')
        month_list = list(range(1, 13))
    # try:
    #     year_list = sorted(list(utils.parse_int_set(ini['INPUTS']['years'])))
    # except:
    #     logging.info('\nINPUTS "years" parameter not set in the INI,'
    #                  '\n  Defaulting to all available years\n')
    #     year_list = []

    # Group asset IDs by image date
    asset_id_dict = defaultdict(list)
    for asset_id in asset_list:
        asset_dt = datetime.datetime.strptime(
            asset_id.split('/')[-1].split('_')[0], '%Y%m%d')
        asset_id_dict[asset_dt.strftime('%Y-%m-%d')].append(asset_id)
    # pprint.pprint(export_dt_dict)

    iter_start_dt = datetime.datetime.strptime(ini['INPUTS']['start_date'],
                                               '%Y-%m-%d')
    iter_end_dt = datetime.datetime.strptime(ini['INPUTS']['end_date'],
                                             '%Y-%m-%d')
    logging.debug('Start Date: {}'.format(iter_start_dt.strftime('%Y-%m-%d')))
    logging.debug('End Date:   {}\n'.format(iter_end_dt.strftime('%Y-%m-%d')))

    for export_dt in sorted(utils.date_range(iter_start_dt, iter_end_dt),
                            reverse=reverse_flag):
        export_date = export_dt.strftime('%Y-%m-%d')

        # if ((month_list and export_dt.month not in month_list) or
        #         (year_list and export_dt.year not in year_list)):
        if month_list and export_dt.month not in month_list:
            logging.debug(f'Date: {export_date} - month not in INI - skipping')
            continue
        elif export_date >= datetime.datetime.today().strftime('%Y-%m-%d'):
            logging.debug(f'Date: {export_date} - unsupported date - skipping')
            continue
        logging.info(f'Date: {export_date}')

        export_id = ini['EXPORT']['export_id_fmt'] \
            .format(
                product=dt_name.lower(),
                date=export_dt.strftime('%Y%m%d'),
                export=datetime.datetime.today().strftime('%Y%m%d'),
                dest=ini['EXPORT']['export_dest'].lower())
        logging.debug('  Export ID: {}'.format(export_id))

        if ini['EXPORT']['export_dest'] == 'ASSET':
            asset_id = '{}/{}_{}'.format(
                dt_daily_coll_id, export_dt.strftime('%Y%m%d'),
                datetime.datetime.today().strftime('%Y%m%d'))
            logging.debug('  Asset ID: {}'.format(asset_id))

        if overwrite_flag:
            if export_id in tasks.keys():
                logging.debug('  Task already submitted, cancelling')
                ee.data.cancelTask(tasks[export_id])
            # This is intentionally not an "elif" so that a task can be
            # cancelled and an existing image/file/asset can be removed
            if (ini['EXPORT']['export_dest'].upper() == 'ASSET'
                    and asset_id in asset_list):
                logging.debug('  Asset already exists, removing')
                ee.data.deleteAsset(asset_id)
        else:
            if export_id in tasks.keys():
                logging.debug('  Task already submitted, exiting')
                continue
            elif (ini['EXPORT']['export_dest'].upper() == 'ASSET'
                  and asset_id in asset_list):
                logging.debug(
                    '  Asset with current export date already exists, '
                    'skipping')
                continue
            elif len(asset_id_dict[export_date]) > 0:
                logging.debug(
                    '  Asset with earlier export date already exists, '
                    'skipping')
                continue

        # Compute dT using a fake Landsat image
        # The system:time_start property is the only needed value
        model_obj = ssebop.Image(
            ee.Image.constant([0, 0]).rename(['ndvi', 'lst']).set({
                'system:time_start':
                utils.millis(export_dt),
                'system:index':
                'LC08_043033_20170716',
                'system:id':
                'LC08_043033_20170716'
            }),
            dt_source=dt_source.upper(),
            elev_source='SRTM',
            dt_min=ini['SSEBOP']['dt_min'],
            dt_max=ini['SSEBOP']['dt_max'],
        )

        # Cast to float and set properties
        dt_img = model_obj.dt.float() \
            .set({
                'system:time_start': utils.millis(export_dt),
                'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
                'date': export_dt.strftime('%Y-%m-%d'),
                'year': int(export_dt.year),
                'month': int(export_dt.month),
                'day': int(export_dt.day),
                'doy': int(export_dt.strftime('%j')),
                'model_name': model_name,
                'model_version': ssebop.__version__,
                'dt_source': dt_source.upper(),
                # 'dt_version': dt_version.upper(),
            })

        # Build export tasks
        if ini['EXPORT']['export_dest'] == 'ASSET':
            logging.debug('  Building export task')
            task = ee.batch.Export.image.toAsset(
                image=ee.Image(dt_img),
                description=export_id,
                assetId=asset_id,
                crs=export_crs,
                crsTransform='[' + ','.join(list(map(str, export_geo))) + ']',
                dimensions='{0}x{1}'.format(*export_shape),
            )
            logging.info('  Starting export task')
            utils.ee_task_start(task)

        # Pause before starting next task
        utils.delay_task(delay)
        logging.debug('')
Esempio n. 21
0
def main(ini_path=None,
         overwrite_flag=False,
         delay=0,
         key=None,
         cron_flag=False,
         reverse_flag=False):
    """Compute daily Tcorr images

    Parameters
    ----------
    ini_path : str
        Input file path.
    overwrite_flag : bool, optional
        If True, overwrite existing files if the export dates are the same and
        generate new images (but with different export dates) even if the tile
        lists are the same.  The default is False.
    delay : float, optional
        Delay time between each export task (the default is 0).
    key : str, optional
        File path to an Earth Engine json key file (the default is None).
    cron_flag : bool, optional
        If True, only compute Tcorr daily image if existing image does not have
        all available image (using the 'wrs2_tiles' property) and limit the
        date range to the last 64 days (~2 months).
    reverse_flag : bool, optional
        If True, process dates in reverse order.
    """
    logging.info('\nCompute daily Tcorr images')

    ini = utils.read_ini(ini_path)

    model_name = 'SSEBOP'
    # model_name = ini['INPUTS']['et_model'].upper()

    if (ini[model_name]['tmax_source'].upper() == 'CIMIS'
            and ini['INPUTS']['end_date'] < '2003-10-01'):
        logging.error(
            '\nCIMIS is not currently available before 2003-10-01, exiting\n')
        sys.exit()
    elif (ini[model_name]['tmax_source'].upper() == 'DAYMET'
          and ini['INPUTS']['end_date'] > '2017-12-31'):
        logging.warning('\nDAYMET is not currently available past 2017-12-31, '
                        'using median Tmax values\n')
        # sys.exit()
    # elif (ini[model_name]['tmax_source'].upper() == 'TOPOWX' and
    #         ini['INPUTS']['end_date'] > '2017-12-31'):
    #     logging.warning(
    #         '\nDAYMET is not currently available past 2017-12-31, '
    #         'using median Tmax values\n')
    #     # sys.exit()

    logging.info('\nInitializing Earth Engine')
    if key:
        logging.info('  Using service account key file: {}'.format(key))
        # The "EE_ACCOUNT" parameter is not used if the key file is valid
        ee.Initialize(ee.ServiceAccountCredentials('deadbeef', key_file=key))
    else:
        ee.Initialize()

    # Output Tcorr daily image collection
    tcorr_daily_coll_id = '{}/{}_daily'.format(
        ini['EXPORT']['export_coll'], ini[model_name]['tmax_source'].lower())

    # Get a Tmax image to set the Tcorr values to
    logging.debug('\nTmax properties')
    tmax_name = ini[model_name]['tmax_source']
    tmax_source = tmax_name.split('_', 1)[0]
    tmax_version = tmax_name.split('_', 1)[1]
    tmax_coll_id = 'projects/usgs-ssebop/tmax/{}'.format(tmax_name.lower())
    tmax_coll = ee.ImageCollection(tmax_coll_id)
    tmax_mask = ee.Image(tmax_coll.first()).select([0]).multiply(0)
    logging.debug('  Collection: {}'.format(tmax_coll_id))
    logging.debug('  Source: {}'.format(tmax_source))
    logging.debug('  Version: {}'.format(tmax_version))

    logging.debug('\nExport properties')
    export_geo = ee.Image(tmax_mask).projection().getInfo()['transform']
    export_crs = ee.Image(tmax_mask).projection().getInfo()['crs']
    export_shape = ee.Image(tmax_mask).getInfo()['bands'][0]['dimensions']
    export_extent = [
        export_geo[2], export_geo[5] + export_shape[1] * export_geo[4],
        export_geo[2] + export_shape[0] * export_geo[0], export_geo[5]
    ]
    logging.debug('  CRS: {}'.format(export_crs))
    logging.debug('  Extent: {}'.format(export_extent))
    logging.debug('  Geo: {}'.format(export_geo))
    logging.debug('  Shape: {}'.format(export_shape))

    # # Limit export to a user defined study area or geometry?
    # export_geom = ee.Geometry.Rectangle(
    #     [-125, 24, -65, 50], proj='EPSG:4326', geodesic=False)  # CONUS
    # export_geom = ee.Geometry.Rectangle(
    #     [-124, 35, -119, 42], proj='EPSG:4326', geodesic=False)  # California

    # If cell_size parameter is set in the INI,
    # adjust the output cellsize and recompute the transform and shape
    try:
        export_cs = float(ini['EXPORT']['cell_size'])
        export_shape = [
            int(math.ceil(abs((export_shape[0] * export_geo[0]) / export_cs))),
            int(math.ceil(abs((export_shape[1] * export_geo[4]) / export_cs)))
        ]
        export_geo = [
            export_cs, 0.0, export_geo[2], 0.0, -export_cs, export_geo[5]
        ]
        logging.debug('  Custom export cell size: {}'.format(export_cs))
        logging.debug('  Geo: {}'.format(export_geo))
        logging.debug('  Shape: {}'.format(export_shape))
    except KeyError:
        pass

    # Get current asset list
    if ini['EXPORT']['export_dest'].upper() == 'ASSET':
        logging.debug('\nGetting asset list')
        # DEADBEEF - daily is hardcoded in the asset_id for now
        asset_list = utils.get_ee_assets(tcorr_daily_coll_id)
    else:
        raise ValueError('invalid export destination: {}'.format(
            ini['EXPORT']['export_dest']))

    # Get current running tasks
    tasks = utils.get_ee_tasks()
    if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
        logging.debug('  Tasks: {}\n'.format(len(tasks)))
        input('ENTER')

    collections = [x.strip() for x in ini['INPUTS']['collections'].split(',')]

    # Limit by year and month
    try:
        month_list = sorted(list(utils.parse_int_set(ini['TCORR']['months'])))
    except:
        logging.info('\nTCORR "months" parameter not set in the INI,'
                     '\n  Defaulting to all months (1-12)\n')
        month_list = list(range(1, 13))
    try:
        year_list = sorted(list(utils.parse_int_set(ini['TCORR']['years'])))
    except:
        logging.info('\nTCORR "years" parameter not set in the INI,'
                     '\n  Defaulting to all available years\n')
        year_list = []

    # Key is cycle day, value is a reference date on that cycle
    # Data from: https://landsat.usgs.gov/landsat_acq
    # I only need to use 8 cycle days because of 5/7 and 7/8 are offset
    cycle_dates = {
        7: '1970-01-01',
        8: '1970-01-02',
        1: '1970-01-03',
        2: '1970-01-04',
        3: '1970-01-05',
        4: '1970-01-06',
        5: '1970-01-07',
        6: '1970-01-08',
    }
    # cycle_dates = {
    #     1:  '2000-01-06',
    #     2:  '2000-01-07',
    #     3:  '2000-01-08',
    #     4:  '2000-01-09',
    #     5:  '2000-01-10',
    #     6:  '2000-01-11',
    #     7:  '2000-01-12',
    #     8:  '2000-01-13',
    #     # 9:  '2000-01-14',
    #     # 10: '2000-01-15',
    #     # 11: '2000-01-16',
    #     # 12: '2000-01-01',
    #     # 13: '2000-01-02',
    #     # 14: '2000-01-03',
    #     # 15: '2000-01-04',
    #     # 16: '2000-01-05',
    # }
    cycle_base_dt = datetime.datetime.strptime(cycle_dates[1], '%Y-%m-%d')

    if cron_flag:
        # CGM - This seems like a silly way of getting the date as a datetime
        #   Why am I doing this and not using the commented out line?
        iter_end_dt = datetime.date.today().strftime('%Y-%m-%d')
        iter_end_dt = datetime.datetime.strptime(iter_end_dt, '%Y-%m-%d')
        iter_end_dt = iter_end_dt + datetime.timedelta(days=-4)
        # iter_end_dt = datetime.datetime.today() + datetime.timedelta(days=-1)
        iter_start_dt = iter_end_dt + datetime.timedelta(days=-64)
    else:
        iter_start_dt = datetime.datetime.strptime(ini['INPUTS']['start_date'],
                                                   '%Y-%m-%d')
        iter_end_dt = datetime.datetime.strptime(ini['INPUTS']['end_date'],
                                                 '%Y-%m-%d')
    logging.debug('Start Date: {}'.format(iter_start_dt.strftime('%Y-%m-%d')))
    logging.debug('End Date:   {}\n'.format(iter_end_dt.strftime('%Y-%m-%d')))

    for export_dt in sorted(utils.date_range(iter_start_dt, iter_end_dt),
                            reverse=reverse_flag):
        export_date = export_dt.strftime('%Y-%m-%d')
        next_date = (export_dt +
                     datetime.timedelta(days=1)).strftime('%Y-%m-%d')
        # if ((month_list and export_dt.month not in month_list) or
        #         (year_list and export_dt.year not in year_list)):
        if month_list and export_dt.month not in month_list:
            logging.debug(f'Date: {export_date} - month not in INI - skipping')
            continue
        elif export_date >= datetime.datetime.today().strftime('%Y-%m-%d'):
            logging.debug(f'Date: {export_date} - unsupported date - skipping')
            continue
        elif export_date < '1984-03-23':
            logging.debug(f'Date: {export_date} - no Landsat 5+ images before '
                          '1984-03-16 - skipping')
            continue
        logging.info(f'Date: {export_date}')

        export_id = ini['EXPORT']['export_id_fmt'] \
            .format(
                product=tmax_name.lower(),
                date=export_dt.strftime('%Y%m%d'),
                export=datetime.datetime.today().strftime('%Y%m%d'),
                dest=ini['EXPORT']['export_dest'].lower())
        logging.debug('  Export ID: {}'.format(export_id))

        if ini['EXPORT']['export_dest'] == 'ASSET':
            asset_id = '{}/{}_{}'.format(
                tcorr_daily_coll_id, export_dt.strftime('%Y%m%d'),
                datetime.datetime.today().strftime('%Y%m%d'))
            logging.debug('  Asset ID: {}'.format(asset_id))

        if overwrite_flag:
            if export_id in tasks.keys():
                logging.debug('  Task already submitted, cancelling')
                ee.data.cancelTask(tasks[export_id])
            # This is intentionally not an "elif" so that a task can be
            # cancelled and an existing image/file/asset can be removed
            if (ini['EXPORT']['export_dest'].upper() == 'ASSET'
                    and asset_id in asset_list):
                logging.debug('  Asset already exists, removing')
                ee.data.deleteAsset(asset_id)
        else:
            if export_id in tasks.keys():
                logging.debug('  Task already submitted, exiting')
                continue
            elif (ini['EXPORT']['export_dest'].upper() == 'ASSET'
                  and asset_id in asset_list):
                logging.debug('  Asset already exists, skipping')
                continue

        # Build and merge the Landsat collections
        model_obj = ssebop.Collection(
            collections=collections,
            start_date=export_dt.strftime('%Y-%m-%d'),
            end_date=(export_dt +
                      datetime.timedelta(days=1)).strftime('%Y-%m-%d'),
            cloud_cover_max=float(ini['INPUTS']['cloud_cover']),
            geometry=tmax_mask.geometry(),
            # model_args=model_args,
            # filter_args=filter_args,
        )
        landsat_coll = model_obj.overpass(variables=['ndvi'])
        # wrs2_tiles_all = model_obj.get_image_ids()
        # pprint.pprint(landsat_coll.aggregate_array('system:id').getInfo())
        # input('ENTER')

        logging.debug('  Getting available WRS2 tile list')
        landsat_id_list = landsat_coll.aggregate_array('system:id').getInfo()
        wrs2_tiles_all = set([id.split('_')[-2] for id in landsat_id_list])
        if not wrs2_tiles_all:
            logging.info('  No available images - skipping')
            continue

        # If overwriting, start a new export no matter what
        # The default is to no overwrite, so this mode will not be used often
        if not overwrite_flag:
            # Check if there are any previous images for this date
            # If so, only build a new Tcorr image if there are new wrs2_tiles
            #   that were not used in the previous image.
            # Should this code only be run in cron mode or is this the expected
            #   operation when (re)running for any date range?
            # Should we only test the last image
            # or all previous images for the date?
            logging.debug(
                '  Checking for previous exports/versions of daily image')
            tcorr_daily_coll = ee.ImageCollection(tcorr_daily_coll_id)\
                .filterDate(export_date, next_date)\
                .limit(1, 'date_ingested', False)
            tcorr_daily_info = tcorr_daily_coll.getInfo()

            if tcorr_daily_info['features']:
                # Assume we won't be building a new image and only set flag
                #   to True if the WRS2 tile lists are different
                export_flag = False

                # The ".limit(1, ..." on the tcorr_daily_coll above makes this
                # for loop and break statement unnecessary, but leaving for now
                for tcorr_img in tcorr_daily_info['features']:
                    # If the full WRS2 list is not present, rebuild the image
                    # This should only happen for much older Tcorr images
                    if 'wrs2_available' not in tcorr_img['properties'].keys():
                        logging.debug(
                            '    "wrs2_available" property not present in '
                            'previous export')
                        export_flag = True
                        break

                    wrs2_tiles_old = set(
                        tcorr_img['properties']['wrs2_available'].split(','))

                    if wrs2_tiles_all != wrs2_tiles_old:
                        logging.debug('  Tile Lists')
                        logging.debug('  Previous: {}'.format(', '.join(
                            sorted(wrs2_tiles_old))))
                        logging.debug('  Available: {}'.format(', '.join(
                            sorted(wrs2_tiles_all))))
                        logging.debug('  New: {}'.format(', '.join(
                            sorted(
                                wrs2_tiles_all.difference(wrs2_tiles_old)))))
                        logging.debug('  Dropped: {}'.format(', '.join(
                            sorted(
                                wrs2_tiles_old.difference(wrs2_tiles_all)))))

                        export_flag = True
                        break

                if not export_flag:
                    logging.debug('  No new WRS2 tiles/images - skipping')
                    continue
                # else:
                #     logging.debug('    Building new version')
            else:
                logging.debug('    No previous exports')

        def tcorr_img_func(image):
            t_stats = ssebop.Image.from_landsat_c1_toa(
                    ee.Image(image),
                    tdiff_threshold=float(ini[model_name]['tdiff_threshold'])) \
                .tcorr_stats
            t_stats = ee.Dictionary(t_stats) \
                .combine({'tcorr_p5': 0, 'tcorr_count': 0},
                         overwrite=False)
            tcorr = ee.Number(t_stats.get('tcorr_p5'))
            count = ee.Number(t_stats.get('tcorr_count'))

            # Remove the merged collection indices from the system:index
            scene_id = ee.List(
                ee.String(image.get('system:index')).split('_')).slice(-3)
            scene_id = ee.String(scene_id.get(0)).cat('_') \
                .cat(ee.String(scene_id.get(1))).cat('_') \
                .cat(ee.String(scene_id.get(2)))

            return tmax_mask.add(tcorr) \
                .rename(['tcorr']) \
                .clip(image.geometry()) \
                .set({
                    'system:time_start': image.get('system:time_start'),
                    'scene_id': scene_id,
                    'wrs2_tile': scene_id.slice(5, 11),
                    'spacecraft_id': image.get('SPACECRAFT_ID'),
                    'tcorr': tcorr,
                    'count': count,
                })

        # Test for one image
        # pprint.pprint(tcorr_img_func(ee.Image(landsat_coll \
        #     .filterMetadata('WRS_PATH', 'equals', 36) \
        #     .filterMetadata('WRS_ROW', 'equals', 33).first())).getInfo())
        # input('ENTER')

        # (Re)build the Landsat collection from the image IDs
        landsat_coll = ee.ImageCollection(landsat_id_list)
        tcorr_img_coll = ee.ImageCollection(landsat_coll.map(tcorr_img_func)) \
            .filterMetadata('count', 'not_less_than',
                            float(ini['TCORR']['min_pixel_count']))

        # If there are no Tcorr values, return an empty image
        tcorr_img = ee.Algorithms.If(tcorr_img_coll.size().gt(0),
                                     tcorr_img_coll.median(),
                                     tmax_mask.updateMask(0))

        def unique_properties(coll, property):
            return ee.String(
                ee.List(
                    ee.Dictionary(
                        coll.aggregate_histogram(property)).keys()).join(','))

        wrs2_tile_list = ee.String('').cat(
            unique_properties(tcorr_img_coll, 'wrs2_tile'))
        landsat_list = ee.String('').cat(
            unique_properties(tcorr_img_coll, 'spacecraft_id'))

        # Cast to float and set properties
        tcorr_img = ee.Image(tcorr_img).rename(['tcorr']).double() \
            .set({
                'system:time_start': utils.millis(export_dt),
                'date_ingested': datetime.datetime.today().strftime('%Y-%m-%d'),
                'date': export_dt.strftime('%Y-%m-%d'),
                'year': int(export_dt.year),
                'month': int(export_dt.month),
                'day': int(export_dt.day),
                'doy': int(export_dt.strftime('%j')),
                'cycle_day': ((export_dt - cycle_base_dt).days % 8) + 1,
                'landsat': landsat_list,
                'model_name': model_name,
                'model_version': ssebop.__version__,
                'tmax_source': tmax_source.upper(),
                'tmax_version': tmax_version.upper(),
                'wrs2_tiles': wrs2_tile_list,
                'wrs2_available': ','.join(sorted(wrs2_tiles_all)),
            })

        # Build export tasks
        if ini['EXPORT']['export_dest'] == 'ASSET':
            logging.debug('  Building export task')
            task = ee.batch.Export.image.toAsset(
                image=ee.Image(tcorr_img),
                description=export_id,
                assetId=asset_id,
                crs=export_crs,
                crsTransform='[' + ','.join(list(map(str, export_geo))) + ']',
                dimensions='{0}x{1}'.format(*export_shape),
            )
            logging.info('  Starting export task')
            utils.ee_task_start(task)

        # Pause before starting next task
        utils.delay_task(delay)
        logging.debug('')
Esempio n. 22
0
 def increment(self):
     self.index += 1
     if (self.index >= LED_COUNT):
         self.index = 0
     self.lastUpdate = millis()