Example #1
0
def dateToDecYear(date):
	"""Convert :class:`datetime.datetime` object to decimal year
	
	**Args**: 
		* **date** (datetime.datetime): date and time
	**Returns**:
		* **dyear** (float): decimal year
			
	written by Sebastien, 2013-02
	"""
	from datetime import datetime as dt
	import time
	
	# returns seconds since epoch
	sE = lambda date: time.mktime(date.timetuple())

	year = date.year
	startOfThisYear = dt(year=year, month=1, day=1)
	startOfNextYear = dt(year=year+1, month=1, day=1)

	yearElapsed = sE(date) - sE(startOfThisYear)
	yearDuration = sE(startOfNextYear) - sE(startOfThisYear)
	fraction = yearElapsed/yearDuration

	return date.year + fraction
Example #2
0
def test_should_raise_exceptions_if_no_libraries_are_found_in_the_date_range_when_reading_data(toplevel_tickstore):
    toplevel_tickstore._collection.insert_one({'start': dt(2010, 1, 1),
                                           'end': dt(2010, 12, 31, 23, 59, 59),
                                           'library_name': 'FEED_2010.LEVEL1'})
    with pytest.raises(NoDataFoundException) as e:
        toplevel_tickstore.read('blah', DateRange(start=dt(2012, 1, 1), end=dt(2012, 3, 1)))
    assert "No underlying libraries exist for the given date range" in str(e)
Example #3
0
    def test_system_units_conform_to(self):
        ocgis.env.OVERWRITE = True
        kwds = {'time_range': [dt(2011, 1, 1), dt(2011, 12, 31, 23, 59, 59)]}
        ds = [self.test_data.get_rd('cancm4_tasmax_2011', kwds=kwds), self.test_data.get_rd('cancm4_rhsmax', kwds=kwds)]

        # Set the conform to units
        ds[0].conform_units_to = 'fahrenheit'
        ds[1].conform_units_to = 'percent'

        calc = [{'func': 'heat_index', 'name': 'heat_index', 'kwds': {'tas': 'tasmax', 'rhs': 'rhsmax'}}]
        select_ugid = [25]

        # Operations on entire data arrays
        ops = ocgis.OcgOperations(dataset=ds, calc=calc)
        self.assertEqual(ops.calc_grouping, None)
        ret = ops.execute()
        ref = ret.get_element()
        hiv = ref['heat_index']
        hi = hiv.get_value()
        self.assertEqual(hi.shape, (365, 64, 128))

        self.assertEqual(hiv.units, None)
        self.assertTrue(hiv.get_mask().any())

        # Try temporal grouping
        ops = ocgis.OcgOperations(dataset=ds, calc=calc, calc_grouping=['month'], geom='state_boundaries',
                                  select_ugid=select_ugid)
        ret = ops.execute()
        actual = ret.get_element()['heat_index'].shape
        self.assertEqual(actual, (12, 4, 4))
Example #4
0
 def test_HeatIndex(self):
     ds = [self.tasmax,self.rhsmax]
     calc = [{'func':'heat_index','name':'heat_index','kwds':{'tas':'tasmax','rhs':'rhsmax','units':'k'}}]
     
     time_range = [dt(2011,1,1),dt(2011,12,31,23,59,59)]
     for d in ds: d['time_range'] = time_range
     ops = OcgOperations(dataset=ds,calc=calc)
     self.assertEqual(ops.calc_grouping,None)
     ret = ops.execute()
     ref = ret[1]
     self.assertEqual(ref.variables.keys(),['tasmax','rhsmax','heat_index'])
     hi = ref.variables['heat_index']
     self.assertEqual(hi.value.shape,(365,1,64,128))
     it = MeltedIterator(ret[1],mode='calc')
     for ii,row in enumerate(it.iter_rows()):
         if ii == 0:
             self.assertEqual(row['value'],None)
         if ii < 1000:
             for key in ['vid','var_name','did','uri']:
                 self.assertEqual(row[key],None)
         else:
             break
     
     ops = OcgOperations(dataset=ds,calc=calc,output_format='numpy',snippet=True)
     ret = ops.execute()
Example #5
0
def test_should_raise_exception_if_date_range_for_library_overlaps_with_existing_libraries(toplevel_tickstore, arctic):
    toplevel_tickstore._collection.insert_one({'library_name': 'FEED_2010.LEVEL1', 'start': dt(2010, 1, 1), 'end': dt(2010, 6, 30)})
    arctic.initialize_library('FEED_2010a.LEVEL1', tickstore.TICK_STORE_TYPE)
    with pytest.raises(OverlappingDataException) as e:
        toplevel_tickstore.add(DateRange(start=dt(2010, 6, 1), end=dt(2010, 12, 31, 23, 59, 59, 999000)), 'FEED_2010a.LEVEL1')
        assert toplevel_tickstore._collection.find_one({'library_name': 'FEED_2010.LEVEL1'})
    assert "There are libraries that overlap with the date range:" in str(e)
Example #6
0
    def test_seasonal_get_grouping_unique_flag(self):
        ## test with year flag
        dates = get_date_list(dt(2012,1,1),dt(2013,12,31),1)
        td = TemporalDimension(value=dates)
        calc_grouping = [[6,7,8],'unique']
        tg = td.get_grouping(calc_grouping)

        time_region = {'year':[2012],'month':[6,7,8]}
        sub1,idx1 = td.get_time_region(time_region,return_indices=True)
        time_region = {'year':[2013],'month':[6,7,8]}
        sub2,idx2 = td.get_time_region(time_region,return_indices=True)
        base_select = np.zeros(td.shape[0],dtype=bool)
        dgroups = deque()
        
        for software,manual in itertools.izip(tg.dgroups,dgroups):
            self.assertNumpyAll(software,manual)
        self.assertEqual(len(tg.dgroups),2)
        self.assertEqual(tg.value.tolist(),[datetime.datetime(2012, 7, 17, 0, 0), datetime.datetime(2013, 7, 17, 0, 0)])
        self.assertEqual(tg.bounds.tolist(),[[datetime.datetime(2012, 6, 1, 0, 0), datetime.datetime(2012, 8, 31, 0, 0)], [datetime.datetime(2013, 6, 1, 0, 0), datetime.datetime(2013, 8, 31, 0, 0)]])
        
        dgroup1 = base_select.copy()
        dgroup1[idx1] = True
        dgroup2 = base_select.copy()
        dgroup2[idx2] = True
        
        dgroups.append(dgroup1)
        dgroups.append(dgroup2)
        
        tg = td.get_grouping([[6,7,8],'year'])
        for ii in range(len(tg.dgroups)):
            self.assertNumpyAll(tg.dgroups[ii],dgroups[ii])
        self.assertEqual(len(tg.dgroups),len(dgroups))
Example #7
0
 def test_constructor_by_temporal_dimension(self):
     value = [dt(2012,1,1),dt(2012,1,2)]
     td = TemporalDimension(value=value)
     tgd = td.get_grouping(['month'])
     self.assertEqual(tuple(tgd.date_parts[0]),(None,1,None,None,None,None))
     self.assertTrue(tgd.dgroups[0].all())
     self.assertNumpyAll(tgd.uid,np.array([1]))
Example #8
0
def test_date_range_default_timezone(tickstore_lib, tz_name):
    """
    We assume naive datetimes are user-local
    """
    DUMMY_DATA = [
                  {'a': 1.,
                   'b': 2.,
                   'index': dt(2013, 1, 1, tzinfo=mktz(tz_name))
                   },
                  # Half-way through the year
                  {'b': 3.,
                   'c': 4.,
                   'index': dt(2013, 7, 1, tzinfo=mktz(tz_name))
                   },
                  ]

    with patch('arctic.date._mktz.DEFAULT_TIME_ZONE_NAME', tz_name):
        tickstore_lib._chunk_size = 1
        tickstore_lib.write('SYM', DUMMY_DATA)
        df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130701), columns=None)
        assert len(df) == 2
        assert df.index[1] == dt(2013, 7, 1, tzinfo=mktz(tz_name))
        assert df.index.tz == mktz(tz_name)

        df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130101), columns=None)
        assert len(df) == 1

        df = tickstore_lib.read('SYM', date_range=DateRange(20130701, 20130701), columns=None)
        assert len(df) == 1
    def _append_segment(self, d, segment_start, inferred = False):

        try:
            index = self.segment_starts.index(t(d.hour, d.minute, d.second))
        except ValueError:
            index = self.segment_starts.index(segment_start) - 1

        try:
            current_segment = self.segment_starts[index]
        except IndexError:
            current_segment = self.segment_starts[len(self.segment_starts) - 1]

        segment = {                
                    'type': 'basal-rate-segment',
                    'delivered': self.schedule[current_segment],
                    'value': self.schedule[current_segment],
                    'deliveryType': 'scheduled',
                    'inferred': inferred,
                    'start': dt(d.year, d.month, d.day, d.hour, d.minute, d.second),
                    'end': dt(d.year, d.month, d.day, segment_start.hour, segment_start.minute, segment_start.second)
                }

        if segment_start == t(0,0,0):
            segment['end'] = segment['end'] + td(days=1)

        # print('Basal segment start', segment['start'].isoformat())
        # print('Basal segment end', segment['end'].isoformat())
        # print('Basal segment rate', segment['delivered'])
        # print()

        self.segments.append(segment);
Example #10
0
    def test_factor_neutralizer(self):
        index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28'], ['001', '002', '003', '004']],
                                           names=['trade_date', 'ticker'])
        data1 = pd.DataFrame(index=index, data=[1.0, 1.0, 1.2, 2.0, 0.9, 5.0, 5.0, 5.1])
        factor_test1 = Factor(data=data1, name='test1',
                              property_dict={'norm_type': FactorNormType.Industry_Cap_Neutral})

        data2 = pd.DataFrame(index=index, data=[2.6, 2.5, 2.8, 2.9, 2.7, 1.9, 5.0, 2.1])
        factor_test2 = Factor(data=data2, name='test2', property_dict={'type': FactorType.ALPHA_FACTOR_MV,
                                                                       'norm_type': FactorNormType.Industry_Neutral})

        data3 = pd.DataFrame(index=index, data=['a', 'b', 'a', 'a', 'a', 'b', 'c', 'b'])
        factor_test3 = Factor(data=data3, name='test3', property_dict={'type': FactorType.INDUSTY_CODE})

        data4 = pd.DataFrame(index=index, data=[1.0, 1.0, 1.2, 2.0, 0.9, 5.0, 5.0, 5.1])
        factor_test4 = Factor(data=data4, name='test4', property_dict={'norm_type': FactorNormType.Industry_Neutral})

        fc = FactorContainer('2014-01-30', '2014-02-28', [factor_test1, factor_test2, factor_test3, factor_test4])

        calculated = FactorNeutralizer().fit_transform(fc)
        index = pd.MultiIndex.from_product([[dt(2014, 1, 30), dt(2014, 2, 28)], ['001', '002', '003', '004']],
                                           names=['trade_date', 'ticker'])
        expected = pd.DataFrame({'test1': [0.0983574180639, 8.881784197e-16, -0.306074564019, 0.207717145955,
                                           -2.10942374679e-15, 8.881784197e-16, -5.3290705182e-15, 0.0],
                                 'test2': [-0.166666666667, 0.0, 0.0333333333333, 0.133333333333, 0.0, -0.1, 0.0, 0.1],
                                 'test3': ['a', 'b', 'a', 'a', 'a', 'b', 'c', 'b'],
                                 'test4': [-0.4, 0.0, -0.2, 0.6, 0.0, -0.05, 0.0, 0.05]},
                                index=index,
                                dtype=object)
        assert_frame_equal(calculated, expected)

        calculated = FactorNeutralizer(out_container=True).fit_transform(fc)
        assert_frame_equal(calculated.data, expected)
        self.assertEqual(calculated.container_property, fc.container_property)
Example #11
0
    def test_selector(self):
        index_weight = pd.MultiIndex.from_product([[dt(2014, 1, 30), dt(2014, 2, 28)], ['a', 'b', 'other']],
                                                  names=INDEX_INDUSTRY_WEIGHT.full_index)
        industry_weight = pd.DataFrame([0.5, 0.4, 0.1, 0.5, 0.3, 0.2], index=index_weight)

        index = pd.MultiIndex.from_product([['2014-01-30', '2014-02-28'], ['001', '002', '003', '004', '005']],
                                           names=INDEX_FACTOR.full_index)
        X = pd.DataFrame({'score': [2, 3, 3, 8, 4, 5, 9, 11, 2, 0],
                          'industry_code': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'other', 'b', 'b']},
                         index=index)

        score = Factor(data=X['score'], name='score', property_dict={'type': FactorType.SCORE})
        industry_code = Factor(data=X['industry_code'], name='industry_code',
                               property_dict={'type': FactorType.INDUSTY_CODE})
        fc = FactorContainer(start_date='2014-01-30', end_date='2014-02-28')
        fc.add_factor(score)
        fc.add_factor(industry_code)

        calculated = Selector(industry_weight=industry_weight,
                              method=SelectionMethod.INDUSTRY_NEUTRAL).fit(fc).predict(fc)

        index_exp = pd.MultiIndex.from_arrays(
            [[dt(2014, 1, 30), dt(2014, 1, 30), dt(2014, 1, 30), dt(2014, 1, 30), dt(2014, 2, 28), dt(2014, 2, 28),
              dt(2014, 2, 28), dt(2014, 2, 28), dt(2014, 2, 28)],
             ['002', '003', '004', '005', '002', '001', '004', '005', '003']], names=['trade_date', 'ticker'])
        expected = pd.DataFrame({'score': [3, 3, 8, 4, 9, 5, 2, 0, 11],
                                 'industry_code': ['a', 'a', 'b', 'b', 'a', 'a', 'b', 'b', 'other'],
                                 'weight': [0.25, 0.25, 0.2, 0.2, 0.25, 0.25, 0.15, 0.15, 0.2]},
                                index=index_exp, dtype=object)
        expected = expected[['score', 'industry_code', 'weight']]
        assert_frame_equal(calculated, expected)
Example #12
0
    def test_replace_exact_row_loss_allowed(self):
        active = R(dt(2000, 1, 1), dt(2001, 1, 1))

        self.session.add(Model(key='a', value=1, period=active))

        imported = [
            dict(key='a', value=2, source=''),
        ]

        class TestDiff(TemporalDiff):
            model = Model
            extract_imported = MultiKeyDictExtractor('key', 'source')
            replace = True

        diff = TestDiff(self.session, imported, dt(2000, 1, 1))

        diff.apply()

        expected = [
            dict(key='a', value=2, period=active),
        ]

        actual = [dict(key=o.key, value=o.value, period=o.period)
                  for o in self.session.query(Model).order_by('key', 'period')]

        compare(expected, actual)
Example #13
0
    def test_load(self):
        ref_test = self.test_data['cancm4_tas']
        uri = self.test_data.get_uri('cancm4_tas')
        rd = RequestDataset(variable=ref_test['variable'],uri=uri)
        field = rd.get()
        ds = nc.Dataset(uri,'r')

        self.assertEqual(field.level,None)
        self.assertEqual(field.spatial.crs,WGS84())

        tv = field.temporal.value
        test_tv = ds.variables['time'][:]
        self.assertNumpyAll(tv,test_tv)
        self.assertNumpyAll(field.temporal.bounds,ds.variables['time_bnds'][:])

        tdt = field.temporal.value_datetime
        self.assertEqual(tdt[4],dt(2001,1,5,12))
        self.assertNumpyAll(field.temporal.bounds_datetime[1001],np.array([dt(2003,9,29),dt(2003,9,30)]))

        rv = field.temporal.value_datetime[100]
        rb = field.temporal.bounds_datetime[100]
        self.assertTrue(all([rv > rb[0],rv < rb[1]]))

        self.assertEqual(field.temporal.extent_datetime,(datetime.datetime(2001,1,1),datetime.datetime(2011,1,1)))

        ds.close()
def show_max_cleaning():
    week = 264
    dataset = bc.Dataset(period=dt(days=7), step_length=dt(days=6))
    period = dataset.get_period(week)
    smoother = cln.BSplineSmoother(period, smoothness=3)
    cleaner = cln.RegressionCleaner(smoother, zscore=0.67)
    (clean_data, outliers) = cleaner.get_cleaned_data(
        cln.RegressionCleaner.replace_with_estimate)
    plt.figure()
    plt.hold(True)
    n = len(smoother.dataset)
    knots = smoother.knots
    t = np.linspace(knots[0], knots[-1], n * 25)
    y = smoother.splev(t)
    plt.hold(True)
    plt.plot(t, y)
    x = np.linspace(knots[0], knots[-1], n)
    plt.plot(x, smoother.dataset, 'mx')
    (lower, upper) = cleaner.get_confidence_interval()

    plt.plot(lower, 'g-')
    plt.plot(upper, 'g-')
    if len(outliers) > 0:        
        print "Drawing %d outliers." % len(outliers)
        plt.plot(outliers, clean_data[outliers], 'r*', label="Cleaned data")
    else:
        print "No outliers!"
Example #15
0
def test_figure_json_encoding():
    df = pd.DataFrame(columns=['col 1'], data=[1, 2, 3])
    s1 = Scatter3d(x=numeric_list, y=np_list, z=mixed_list)
    s2 = Scatter(x=df['col 1'])
    data = Data([s1, s2])
    figure = Figure(data=data)

    js1 = json.dumps(s1, cls=utils.PlotlyJSONEncoder, sort_keys=True)
    js2 = json.dumps(s2, cls=utils.PlotlyJSONEncoder, sort_keys=True)

    assert(js1 == '{"type": "scatter3d", "x": [1, 2, 3], '
                  '"y": [1, 2, 3, null, null, null, "2014-01-05"], '
                  '"z": [1, "A", "2014-01-05", '
                  '"2014-01-05 01:01:01", "2014-01-05 01:01:01.000001"]}')
    assert(js2 == '{"type": "scatter", "x": [1, 2, 3]}')

    # Test JSON encoding works
    json.dumps(data, cls=utils.PlotlyJSONEncoder, sort_keys=True)
    json.dumps(figure, cls=utils.PlotlyJSONEncoder, sort_keys=True)

    # Test data wasn't mutated
    assert(bool(np.asarray(np_list ==
                np.array([1, 2, 3, np.NaN,
                          np.NAN, np.Inf, dt(2014, 1, 5)])).all()))
    assert(set(data[0]['z']) ==
           set([1, 'A', dt(2014, 1, 5), dt(2014, 1, 5, 1, 1, 1),
                dt(2014, 1, 5, 1, 1, 1, 1)]))
    def criteria_for_duplication(self,ref_data,dup,logger,exact_match=True):
        score = {'similarity':0,'selected':0}
        try:
            dup_dt=dt(1900,1,1)
            result_dt=dt(1901,1,1)
            try:
                dup_dt=dup['extras']['date_updated']
            except KeyError:
                dup_dt=dup['extras']['date_released']

            try:
                result_dt=ref_data['extras']['date_updated']
            except KeyError:
                result_dt=ref_data['extras']['date_released']

            if dup_dt < result_dt:
                score['similarity'] = 0.1
                score['selected'] = 1
            elif dup_dt > result_dt:
                score['similarity'] = 0.1
                score['selected'] = -1
            elif dup_dt == result_dt:
                score['similarity'] = 0.4
        except TypeError:
            score['similarity']=0.2
            logging.getLogger('log_err').error('TypeError: ObjectId("%s"): at least one arg must be a datetime.date' %
                (dup['_id']))
        except KeyError:
            score['similarity']=0.2

        dup_list_resources=[]
        result_list_resources=[]
        dup_list_res=[]
        result_list_res=[]
        dup_list_filenames=[]
        result_list_filenames=[]
        if 'resources' in dup.keys() or 'resources' in ref_data.keys():
            if 'resources' in dup.keys():
                for res in dup['resources']:
                    try:
                        if 'url' in res and res['url'] not in ['',None]:
                            dup_list_resources.append(res['url'].lower())
                        if 'size' in res and res['size'] not in ['',None]:
                            dup_list_res.append({'size':res['size'],'format':res['format']})
                        if 'file_name' in res and res['file_name'] not in ['',None]:
                            dup_list_filenames.append(res['file_name'].lower())
                    except KeyError,e:
                        print('%s for dup: ObjectId("%s")' % (e,dup['_id']))

            if 'resources' in ref_data.keys():
                for res in ref_data['resources']:
                    try:
                        if 'url' in res and res['url'] not in ['',None]:
                            result_list_resources.append(res['url'].lower())
                        if 'size' in res and res['size'] not in ['',None]:
                            result_list_res.append({'size':res['size'],'format':res['format']})
                        if 'file_name' in res and res['file_name'] not in ['',None]:
                            result_list_filenames.append(res['file_name'].lower())
                    except KeyError,e:
                        print('%s for ref_data: ObjectId("%s")' % (e,ref_data['_id']))
Example #17
0
def readIcalFile(events, ical_file, start_dt, end_dt):
    try:
        with open(ical_file, "r") as fh:
            ical_str = fh.read()
            fh.close()
    except IOError as e:
        print("iCal file open error")
        return

    cal = icalendar.Calendar.from_ical(ical_str)

    for e in cal.walk():
        if e.name == 'VEVENT':
            start = e.decoded("dtstart").strftime('%Y,%m,%d').split(',')
            dtstart = dt(int(start[0]), int(start[1]), int(start[2]))
            end = e.decoded("dtend").strftime('%Y,%m,%d').split(',')
            dtend = dt(int(end[0]), int(end[1]), int(end[2]))
            if (start_dt <= dtstart and start_dt <= dtend and dtend < end_dt):
                dict_schedule = {
                    "title": e.decoded("summary").decode('utf-8') if
                    e.get("summary") else "",
                    "place": e.decoded("location").decode('utf-8') if
                    e.get("location") else "",
                    "desc": e.decoded("description").decode('utf-8') if
                    e.get("description") else "",
                    "rrule": e.decoded("rrule") if e.get("rrule") else "",
                    "start": e.decoded("dtstart"),
                    "end": e.decoded("dtend"),
                }
                events.append(dict_schedule)
    return events
    def test_remove_after(self):
        _, agent_id = self._insert_agent_to_stat_agent()
        stats = {
            dt(2012, 1, 1): {
                agent_id: {
                    'login_time': timedelta(minutes=15),
                    'pause_time': timedelta(minutes=13)
                },
            },
            dt(2012, 1, 2): {
                agent_id: {
                    'login_time': timedelta(minutes=20),
                    'pause_time': timedelta(minutes=13)
                },
            },
            dt(2012, 1, 3): {
                agent_id: {
                    'login_time': timedelta(minutes=25),
                    'pause_time': timedelta(minutes=13)
                },
            },
        }

        self.session.begin()
        for period_start, agents_stats in stats.iteritems():
            stat_agent_periodic_dao.insert_stats(self.session, agents_stats, period_start)
        self.session.commit()

        stat_agent_periodic_dao.remove_after(self.session, dt(2012, 1, 2))

        res = self.session.query(StatAgentPeriodic.time)

        self.assertEqual(res.count(), 1)
        self.assertEqual(res[0].time, dt(2012, 1, 1))
Example #19
0
    def test_find_all_in_period_not_found(self):
        expected_result = []
        start, end = dt(2013, 1, 1), dt(2013, 2, 1)

        result = call_log_dao.find_all_in_period(start, end)

        assert_that(result, equal_to(expected_result))
Example #20
0
def test_tickstore_to_bucket_always_forwards_image():
    symbol = 'SYM'
    tz = 'UTC'
    initial_image = {'index': dt(2014, 2, 1, 0, 0, tzinfo=mktz(tz)), 'A': 123, 'B': 54.4, 'C': 'DESC'}
    data = [{'index': dt(2014, 1, 1, 0, 1, tzinfo=mktz(tz)), 'A': 124, 'D': 0}]
    with pytest.raises(UnorderedDataException) as e:
        TickStore._to_bucket(data, symbol, initial_image)
Example #21
0
def test_tickstore_to_bucket_with_image():
    symbol = 'SYM'
    tz = 'UTC'
    initial_image = {'index': dt(2014, 1, 1, 0, 0, tzinfo=mktz(tz)), 'A': 123, 'B': 54.4, 'C': 'DESC'}
    data = [{'index': dt(2014, 1, 1, 0, 1, tzinfo=mktz(tz)), 'A': 124, 'D': 0},
            {'index': dt(2014, 1, 1, 0, 2, tzinfo=mktz(tz)), 'A': 125, 'B': 27.2}]
    bucket, final_image = TickStore._to_bucket(data, symbol, initial_image)
    assert bucket[COUNT] == 2
    assert bucket[END] == dt(2014, 1, 1, 0, 2, tzinfo=mktz(tz))
    assert set(bucket[COLUMNS]) == set(('A', 'B', 'D'))
    assert set(bucket[COLUMNS]['A']) == set((ROWMASK, DTYPE, DATA))
    assert get_coldata(bucket[COLUMNS]['A']) == ([124, 125], [1, 1, 0, 0, 0, 0, 0, 0])
    assert get_coldata(bucket[COLUMNS]['B']) == ([27.2], [0, 1, 0, 0, 0, 0, 0, 0])
    assert get_coldata(bucket[COLUMNS]['D']) == ([0], [1, 0, 0, 0, 0, 0, 0, 0])
    index = [dt.fromtimestamp(int(i/1000)).replace(tzinfo=mktz(tz)) for i in
             list(np.cumsum(np.frombuffer(decompress(bucket[INDEX]), dtype='uint64')))]
    assert index == [i['index'] for i in data]
    assert bucket[COLUMNS]['A'][DTYPE] == 'int64'
    assert bucket[COLUMNS]['B'][DTYPE] == 'float64'
    assert bucket[SYMBOL] == symbol
    assert bucket[START] == initial_image['index']
    assert bucket[IMAGE_DOC][IMAGE] == initial_image
    assert bucket[IMAGE_DOC] == {IMAGE: initial_image,
                                 IMAGE_TIME: initial_image['index']}
    assert final_image == {'index': data[-1]['index'], 'A': 125, 'B': 27.2, 'C': 'DESC', 'D': 0}
Example #22
0
def decimalYear(date):
    """
    Calculates the decimal representation of a date, e.g., 2013.12.
    
    The code uses Python's datetime package to determine the fractional
    year. Thus, leap years are taken into account. There may still be
    issues with time zones or daylight saving times etc..
    
    Code from:
    http://stackoverflow.com/questions/6451655/python-how-to-convert-datetime-dates-to-decimal-years

    Parameters
    ----------
    date : python date instance
        The input date (and time).
      
    Returns
    -------
    Decimal year : float
        Decimal representation of the date.
    """
    def s(date):
      # returns seconds since epoch
      return (date - dt(1900,1,1)).total_seconds()

    year = date.year
    startOfThisYear = dt(year=year, month=1, day=1)
    startOfNextYear = dt(year=year+1, month=1, day=1)

    yearElapsed = s(date) - s(startOfThisYear)
    yearDuration = s(startOfNextYear) - s(startOfThisYear)
    fraction = yearElapsed/yearDuration

    return date.year + fraction
Example #23
0
def test_daterange_lt():
    dr = DateRange(dt(2013, 1, 1))
    dr2 = DateRange(dt(2001, 1, 1))

    assert dr2 < dr
    dr.start = None
    assert (dr2 < dr) == False
Example #24
0
def test_multi_index_update(bitemporal_library):
    sample_timerange = list(sorted(['2012-09-08 17:06:11.040', '2012-10-08 17:06:11.040', '2012-10-09 17:06:11.040', '2012-11-08 17:06:11.040'] * 2))
    ts = multi_index_df_from_arrs(
        index_headers=('index 1', 'index 2'),
        index_arrs=[
            sample_timerange,
            ['SPAM Index', 'EGG Index'] * 4
        ],
        data_dict={'near': [1.0, 1.1, 2.0, 2.1, 2.5, 2.6, 3.0, 3.1]}
    )

    ts2 = multi_index_df_from_arrs(
        index_headers=('index 1', 'index 2'),
        index_arrs=[
            ['2012-09-08 17:06:11.040', '2012-09-08 17:06:11.040', '2012-12-08 17:06:11.040'],
            ['SPAM Index', 'EGG Index', 'SPAM Index'],
        ],
        data_dict={'near': [1.2, 1.6, 4.0]}
    )

    expected_ts = multi_index_df_from_arrs(
        index_headers=('index 1', 'index 2'),
        index_arrs=[
            sample_timerange + ['2012-12-08 17:06:11.040'],
            ['EGG Index', 'SPAM Index'] * 4 + ['SPAM Index']
        ],
        data_dict={'near': [1.6, 1.2, 2.1, 2.0, 2.6, 2.5, 3.1, 3.0, 4.0]}
    )
    bitemporal_library.update('spam', ts, as_of=dt(2015, 1, 1))
    bitemporal_library.update('spam', ts2, as_of=dt(2015, 1, 2))
    assert_frame_equal(expected_ts, bitemporal_library.read('spam').data)
    assert bitemporal_library.read('spam').last_updated == dt(2015, 1, 2, tzinfo=LOCAL_TZ)
Example #25
0
 def test_units_conform_to(self):
     ocgis.env.OVERWRITE = True
     kwds = {'time_range':[dt(2011,1,1),dt(2011,12,31,23,59,59)]}
     ds = [self.test_data.get_rd('cancm4_tasmax_2011',kwds=kwds),self.test_data.get_rd('cancm4_rhsmax',kwds=kwds)]
     
     ## set the conform to units
     ds[0].conform_units_to = 'fahrenheit'
     ds[1].conform_units_to = 'percent'
     
     calc = [{'func':'heat_index','name':'heat_index','kwds':{'tas':'tasmax','rhs':'rhsmax'}}]
     select_ugid = [25]
     
     ## operations on entire data arrays
     ops = ocgis.OcgOperations(dataset=ds,calc=calc)
     self.assertEqual(ops.calc_grouping,None)
     ret = ops.execute()
     ref = ret[1]
     self.assertEqual(ref.keys(),['tasmax_rhsmax'])
     self.assertEqual(ref['tasmax_rhsmax'].variables.keys(),['heat_index'])
     hi = ref['tasmax_rhsmax'].variables['heat_index'].value
     self.assertEqual(hi.shape,(1,365,1,64,128))
     
     ## ensure the units are none
     self.assertEqual(ret[1]['tasmax_rhsmax'].variables['heat_index'].units,None)
     
     ## confirm no masked geometries
     self.assertFalse(ref['tasmax_rhsmax'].spatial.geom.point.value.mask.any())
     ## confirm some masked data in calculation output
     self.assertTrue(hi.mask.any())
             
     # try temporal grouping
     ops = ocgis.OcgOperations(dataset=ds,calc=calc,calc_grouping=['month'],geom='state_boundaries',select_ugid=select_ugid)
     ret = ops.execute()
     self.assertEqual(ret[25]['tasmax_rhsmax'].variables['heat_index'].value.shape,(1,12,1,5,4))
def main():
    # 引数の処理
    target_time = ""   # 処理対象の時刻を引数で指定
    target_dir = ""
    try_times = 10    # 試行回数
    argvs = sys.argv   # コマンドライン引数を格納したリストの取得
    print(argvs)
    argc = len(argvs)  # 引数の個数
    if argc > 3:
        target_time = argvs[1]      # 16 or 23
        target_dir = argvs[2]       # 結果を保存するフォルダ名
        try_times = int(argvs[3])   # 作成する学習器の数(RFなら2000〜3000が良いと思う)
    else:
        print("input: target-time, target-dir, try-times")
        exit()

    # 教師データ作成の準備
    terms = [(dt(2004, 2, 18), dt(2013, 9, 3)), (dt(2015, 6, 23), dt(2017, 9, 30))] # for aso
    #terms = [(dt(2010, 2, 18), dt(2013, 9, 3)), (dt(2015, 6, 23), dt(2016, 5, 1))] # for aso
    #terms = [(dt(2015, 6, 23), dt(2016, 2, 1))] # for aso
    #terms = [(dt(2015, 3, 1), dt(2016, 4, 1))] # for chichibu

    _save_flag = False # 検証用にファイルを残したいなら、Trueとする

    # 特徴生成オブジェクト作成
    fg_obj = feature.feature_generator(target_time)
    tc = create_learning_data.teacher_creator(fg_obj, terms, "unkai_date.csv") # 一度メモリ内に教師データを作成するが、時間がかかる。
    process(target_dir, tc, save_flag=_save_flag, try_times=try_times)
Example #27
0
def test_bitemporal_store_read_as_of_timezone(bitemporal_library):
    bitemporal_library.update('spam', ts1, as_of=dt(2015, 5, 1, tzinfo=mktz('Europe/London')))
    bitemporal_library.update('spam', read_str_as_pandas("""           sample_dt | near
                                                         2012-12-01 17:06:11.040 | 25"""),
                              as_of=dt(2015, 5, 2, tzinfo=mktz('Europe/London')))
    df = bitemporal_library.read('spam', as_of=dt(2015, 5, 2, tzinfo=mktz('Asia/Hong_Kong'))).data
    assert_frame_equal(df, ts1)
Example #28
0
def test_daterange_closedclosed_no_tz():
    date_range = DateRange(dt(2013, 1, 1), dt(2014, 2, 1), OPEN_OPEN)
    expected = DateRange(
        dt(2013, 1, 1, 0, 0, 0, 1000, tzinfo=mktz()), dt(2014, 1, 31, 23, 59, 59, 999000, tzinfo=mktz()), CLOSED_CLOSED
    )
    act = to_pandas_closed_closed(date_range)
    assert act == expected
Example #29
0
    def test_HeatIndex(self):
        kwds = {'time_range':[dt(2011,1,1),dt(2011,12,31,23,59,59)]}
        ds = [self.test_data.get_rd('cancm4_tasmax_2011',kwds=kwds),self.test_data.get_rd('cancm4_rhsmax',kwds=kwds)]
        calc = [{'func':'heat_index','name':'heat_index','kwds':{'tas':'tasmax','rhs':'rhsmax','units':'k'}}]
        
        ## operations on entire data arrays
        ops = OcgOperations(dataset=ds,calc=calc)
        self.assertEqual(ops.calc_grouping,None)
        ret = ops.execute()
        ref = ret[1]
        self.assertEqual(ref.variables.keys(),['tasmax','rhsmax'])
        self.assertEqual(ref.calc.keys(),['heat_index'])
        hi = ref.calc['heat_index']
        self.assertEqual(hi.shape,(365,1,64,128))
        
        ## confirm no masked geometries
        self.assertFalse(ref._archetype.spatial.vector.geom.mask.any())
        ## confirm some masked data in calculation output
        self.assertTrue(hi.mask.any())
        
        ## snippet-based testing
        ops = OcgOperations(dataset=ds,calc=calc,snippet=True)
        ret = ops.execute()
        self.assertEqual(ret[1].calc['heat_index'].shape,(1,1,64,128))
        ops = OcgOperations(dataset=ds,calc=calc,snippet=True,output_format='csv')
        ret = ops.execute()
        
#        subprocess.check_call(['loffice',ret])
        
        # try temporal grouping
        ops = OcgOperations(dataset=ds,calc=calc,calc_grouping=['month'])
        ret = ops.execute()
        self.assertEqual(ret[1].calc['heat_index'].shape,(12,1,64,128))
        ret = OcgOperations(dataset=ds,calc=calc,calc_grouping=['month'],
                            output_format='csv',snippet=True).execute()
Example #30
0
def test_date_range_default_timezone(tickstore_lib, tz_name):
    """
    We assume naive datetimes are user-local
    """
    DUMMY_DATA = [
                  {'a': 1.,
                   'b': 2.,
                   'index': dt(2013, 1, 1, tzinfo=mktz(tz_name))
                   },
                  # Half-way through the year
                  {'b': 3.,
                   'c': 4.,
                   'index': dt(2013, 7, 1, tzinfo=mktz(tz_name))
                   },
                  ]

    with patch('tzlocal.get_localzone', return_value=Mock(zone=tz_name)):
        tickstore_lib._chunk_size = 1
        tickstore_lib.write('SYM', DUMMY_DATA)
        df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130701), columns=None)

        assert df.index.tzinfo == mktz()

        assert len(df) == 2
        assert df.index[1] == dt(2013, 7, 1, tzinfo=mktz(tz_name))
        df = tickstore_lib.read('SYM', date_range=DateRange(20130101, 20130101), columns=None)
        assert len(df) == 1
        assert df.index.tzinfo == mktz()

        df = tickstore_lib.read('SYM', date_range=DateRange(20130701, 20130701), columns=None)
        assert len(df) == 1
        assert df.index.tzinfo == mktz()
Example #31
0
    table = fetched_dataframes[0]
    fname = "pm2p5_" + now.strftime('%Y_%m_%d_%H%M%S') + ".csv"
    table.to_csv(fname, encoding="utf-8-sig")


# ダウンロードする時刻をセット ここでは、過去も含む
hours = []
#for i in range(24): # 気象庁の予報はいつ更新されるか分からない・・・
#	hours.append(td(hours=i, minutes=20, seconds=0))
hours.append(td(hours=7, minutes=20, seconds=0))
hours.append(td(hours=0, minutes=20, seconds=0))

# 次にダウンロードすべき時刻 過去の時刻は全て未来の時刻に更新
_next = []
for mem in hours:
    t = dt(dt.now().year, dt.now().month, dt.now().day, 0, 0, 0) + mem
    if t < dt.now():
        t += td(days=1)
    _next.append(t)
print(_next)

while True:
    for i in range(len(_next)):
        t = _next[i]
        now = dt.now()
        if t <= now:
            print("--try download--")
            print(str(now))
            try:
                download()
            except Exception as e:
Example #32
0
    def read_xls(filepath: str,
                 encoding: str='utf-8',
                 sheet_index: int=0,
                 skiprows: int=0,
                 skipcols: int=0,
                 chopcols: int=0,
                 date_formats: dict=None,
                 errors: str='coerce',
                 fill_date_errors: bool=False) -> pd.DataFrame:
        """Read an XLS file into a DataFrame.

        This function is designed to deal with *old* XLS files which
        the ``pandas.read_excel`` function does not support.

        Args:
            filepath (str): Full path to the file to be read.
            encoding (str, optional): Encoding used to read the XLS file.
                Defaults to 'utf-8'.
            sheet_index (int, optional): Index of the sheet to be read.
                Defaults to 0.
            skiprows (int, optional): Number of rows to skip (from the
                beginning of the file). Defaults to 0.
            skipcols (int, optional): Number of columns to skip (from the left).
                Defaults to 0.
            chopcols (int, optional): Number of columns to skip/chop (from the
               right). Defaults to 0.
            date_formats (dict, optional): Dictionary of
                ``{col_name: strftime_mask}``. Defaults to None.
            errors (str, optional): Method used by ``pandas.read_csv`` to
                resolve date parsing errors. Defaults to 'coerce'.
            fill_date_errors (bool, optional): Fill coerced NaT date errors
                with '1900-01-01'. Defaults to False.

        :Logic:
            The passed XLS file is opened and parsed by ``xlrd``,
            then read into an in-memory stream buffer, which is
            passed into ``pandas.read_csv`` for conversion to a
            DataFrame.

        Raises:
            ValueError: If the file extension is not 'xls'.
            IOError: If the workbook does not contain any rows of data.

        Returns:
            df (pd.DataFrame): A DataFrame containing the contents of
            the XLS file.

        """
        if os.path.splitext(filepath)[1].lower() != '.xls':
            raise ValueError('The file *must* be an XLS file.')
        chopcols = -chopcols if chopcols else None
        stream = io.StringIO(newline='\n')
        wb = xlrd.open_workbook(filepath, encoding_override=encoding)
        ws = wb.sheet_by_index(sheet_index)
        if not ws.nrows:
            raise IOError('This workbook does not contain any rows of data.')
        rows = ws.get_rows()
        if skiprows:
            for i in range(skiprows):
                next(rows)
        for i in rows:
            stream.write(','.join(j.value for j in i[skipcols:chopcols]) + '\n')
        _ = stream.seek(0)
        df = pd.read_csv(stream)
        if date_formats:
            for col, fmt in date_formats.items():
                df[col] = pd.to_datetime(df[col], format=fmt, errors=errors)
                if fill_date_errors:
                    df[col].fillna(dt(1900,1,1), inplace=True)
        stream.close()
        return df
Example #33
0
executor_pc = ThreadPoolExecutor(max_workers=2)

channel_flag = [False, False, False, False]
PC_flag = [False]
res = [[], [], [], []]
res_PC = [None]
single_plot = [[], [], [], []]
multi = [[], [], [], []]
tab = [[], [], [], [], []]
number_points_select_value = [100]
channel_select_value = ["Channel 1", "Channel 1"]
time_range_select_value = ["1000"]
trigger_level_input_value = [1]
PT = timezone("US/Pacific")
curr_day = [dt.now(PT).day]
reference_date = [dt(2017, 1, 1, 0, 0, tzinfo=PT)]
reference_offset = [
    (reference_date[0].utcoffset() - dt.now(PT).utcoffset()).total_seconds()
]
channel_names = ["", "", "", ""]
source_PC = ColumnDataSource(data=dict(x=[], y=[]))
home_dir = "/home/lattice/ITdata/"

channel_button = []
source = []
source_load = []
for i in range(4):
    channel_button.append(
        Toggle(label="Channel" + str(i + 1) + " Off",
               button_type="success",
               active=True))
import numpy as np
import Oger, mdp
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
import pandas as pd

import sg.models.esn as esn
import sg.utils
from sg.data.sintef.create_full_temp_data import data as read_temperatures
import sg.data.sintef.userloads as ul
import sg.data.bchydro as bc
import sg.models.load_prediction as load_prediction

options = load_prediction.get_options()
dataset = load_prediction.BCHydroDataset(options, dt(hours=672))

# [len_data, res_size, leak, input, bias, spectral, 
#  seed, ridge, tmp_sm, load_sm]
genome = [672, 194, 0.9507914597451542, 0.23017393420143673, 0.18145624723908402, 1.1091372652108626, 53380, 1.4880952380952382e-07]

l = sg.utils.Enum('hindsight', 'size', 'leak', 'in_scale', 
                     'bias_scale', 'spectral', 'seed', 'ridge')#,
#'t_smooth', 'l_smooth', 't_zscore', 'l_zscore')


# A bit of work is needed to normalize an array that contains NaNs.
prediction_steps = 24
train_iter = dataset.train_data_iterator()
test_iter = dataset.test_data_iterator()
Example #35
0
from datetime import datetime as dt

hosts_path = 'hosts'
# hosts_path = '/etc/hosts' for linux users
# hosts_path = r'C:\Windows\System32\drivers\etc\hosts' for windows users
redirect = '127.0.0.1'
website_list = ['www.facebook.com', 'facebook.com']

while True:
    if dt(dt.now().year, dt.now().month, dt.now().day, 9) < dt.now() < dt(dt.now().year, dt.now().month, dt.now().day, 17):
        with open(hosts_path, 'r+') as file:
            content = file.read()
            for website in website_list:
                if website in content:
                    pass
                else:
                    file.write(redirect + ' ' + website + '\n')

    else:
        with open(hosts_path, 'r+') as file:
            content = file.readlines()
            file.seek(0)
            for line in content:
                if not any(website in line for website in website_list):
                    file.write(line)
            file.truncate()
from trainer.ForecastCONVAeroThunder import ForecastCONVAeroThunder

#
# параметры подключения к монго
#
mongo_db = 'srcdata'
mongo_collection = 'meteoisd'
mongo_host = 'localhost'
mongo_port = 27017

#
# начало периода
# для данных, на которых обучаемся
#
dt_cur = dt(2005, 1, 1)
# dt_cur  = dt(2017, 1, 1)
#
# начало периода для данных, на которых проверяем
#
dt_pred = dt(2016, 7, 21)

#
# количество эпох
#
num_epochs = 50
# празмер батча
#
batch_size = 32

# входные параметры
#DAG object
from airflow import DAG
# BashOperator
from airflow.operators.bash_operator import BashOperator
# days ago function
from airflow.utils.dates import days_ago
from datetime import datetime as dt
from datetime import timedelta
import os

default_args = {
    'owner': 'airflow',
    'depends_on_past': False,
    'start_date': dt(2020, 12, 5),
    'retries': 1,
    'retry_delay': timedelta(minutes=1)
}

dag = DAG('Start_hadoop',
          description='Check if Hadoop running',
          default_args=default_args,
          schedule_interval=timedelta(days=1))

t1 = BashOperator(task_id='Start_hadoop', bash_command='hadoop_start', dag=dag)
Example #38
0
import os
import base64

from plotly import optional_imports, utils
from plotly.graph_objs import Scatter, Scatter3d, Figure, Data
from PIL import Image

matplotlylib = optional_imports.get_module("plotly.matplotlylib")

if matplotlylib:
    import matplotlib.pyplot as plt
    from plotly.matplotlylib import Exporter, PlotlyRenderer

## JSON encoding
numeric_list = [1, 2, 3]
np_list = np.array([1, 2, 3, np.NaN, np.NAN, np.Inf, dt(2014, 1, 5)])
mixed_list = [
    1,
    "A",
    dt(2014, 1, 5),
    dt(2014, 1, 5, 1, 1, 1),
    dt(2014, 1, 5, 1, 1, 1, 1),
]
dt_list = [dt(2014, 1, 5), dt(2014, 1, 5, 1, 1, 1), dt(2014, 1, 5, 1, 1, 1, 1)]

df = pd.DataFrame(columns=["col 1"],
                  data=[1, 2, 3,
                        dt(2014, 1, 5), pd.NaT, np.NaN, np.Inf])

rng = pd.date_range("1/1/2011", periods=2, freq="H")
ts = pd.Series([1.5, 2.5], index=rng)
Example #39
0
import time
from datetime import datetime as dt

host_path = "/etc/hosts"
redirect = "127.0.0.1"
website_list = ["www.netflix.com", "www.facebook.com"]

while True:
    ymd = (dt.now().year, dt.now().month, dt.now().day)
    if dt(*ymd, 8) <dt.now() <dt(*ymd, 16):
        print("Rihanna")
        file = open(host_path, "r+")
        content = file.read()
        for website in website_list:
            if website in content:
                pass
            else:
                file.write(redirect + " " + website + "\n")
    else:
        print("Drake")
        file = open(host_path, 'r+')
        content = file.readlines()
        file.seek(0)
        for line in content:
            if not any(website in line for website in website_list):
                file.write(line)
            file.truncate()
    time.sleep(5)
Example #40
0
import time
from datetime import datetime as dt

hosts_temp = "hosts"
host_path = "C:\\Windows\\System32\\drivers\\etc\\hosts"
redirect = "127.0.0.1"
website_list = ["www.facebook.com", "facebook.com"]

while True:
    if dt(dt.now().year,
          dt.now().month,
          dt.now().day, 8) < dt.now() < dt(dt.now().year,
                                           dt.now().month,
                                           dt.now().day, 16):
        print("Working hours ...")
        with open(hosts_temp, 'r+') as file:
            content = file.read()
            for website in website_list:
                if website in content:
                    pass
                else:
                    file.write(redirect + " " + website + "\n")
    else:
        print("Fun hours ....")
        with open(hosts_temp, 'r+') as file:
            content = file.readlines()
            file.seek(0)
            for line in content:
                if not any(website in line for website in website_list):
                    file.write(line)
            file.truncate()
Example #41
0
def work(modelDir, ind):
    ''' Model processing done here. '''
    epochs = int(ind['epochs'])
    o = {}  # See bottom of file for out's structure

    try:
        with open(pJoin(modelDir, 'hist.csv'), 'w') as f:
            f.write(ind['histCurve'].replace('\r', ''))
        df = pd.read_csv(pJoin(modelDir, 'hist.csv'))
        assert df.shape[0] >= 26280, 'At least 3 years of data is required'

        if 'dates' not in df.columns:
            df['dates'] = df.apply(lambda x: dt(int(x['year']), int(x[
                'month']), int(x['day']), int(x['hour'])),
                                   axis=1)
    except:
        raise Exception("Load CSV file is incorrect format.")

    try:
        weather = [float(i) for i in ind['tempCurve'].split('\n') if i != '']
        assert len(weather) == 72, "weather csv in wrong format"
    except:
        raise Exception(ind['tempCurve'])

    # ---------------------- MAKE PREDICTIONS ------------------------------- #

    df = df.sort_values('dates')
    df = autofill(df)
    d = dict(df.groupby(df.dates.dt.date)['dates'].count())
    df = df[df['dates'].dt.date.apply(lambda x: d[x] == 24)]  # find all non-24

    df, tomorrow = lf.add_day(df, weather[:24])
    all_X = lf.makeUsefulDf(df)
    all_y = df['load']

    if ind['newModel'] == 'False':
        for day in ['one_day_model', 'two_day_model', 'three_day_model']:
            with open(pJoin(modelDir, ind[day + '_filename']), 'wb') as f:
                f.write(base64.standard_b64decode(ind[day]))

    #load prediction
    tomorrow_load, model, tomorrow_accuracy = lf.neural_net_next_day(
        all_X,
        all_y,
        epochs=epochs,
        save_file=pJoin(modelDir, 'one_day_model.h5'),
        model=(None
               if ind['newModel'] == 'True' else tf.keras.models.load_model(
                   pJoin(modelDir, ind['one_day_model_filename']))))

    o['tomorrow_load'] = tomorrow_load
    o['month_start'] = dt(tomorrow.year, tomorrow.month,
                          1).strftime("%A, %B %-d, %Y")
    o['forecast_start'] = tomorrow.strftime("%A, %B %-d, %Y")

    # second day
    df, second_day = lf.add_day(df, weather[24:48])
    if second_day.month == tomorrow.month:
        all_X = lf.makeUsefulDf(df, hours_prior=48, noise=5)
        all_y = df['load']
        two_day_predicted_load, two_day_model, two_day_load_accuracy = lf.neural_net_next_day(
            all_X,
            all_y,
            epochs=epochs,
            hours_prior=48,
            save_file=pJoin(modelDir, 'two_day_model.h5'),
            model=(None if ind['newModel'] == 'True' else
                   tf.keras.models.load_model(
                       pJoin(modelDir, ind['two_day_model_filename']))))
        two_day_peak = max(two_day_predicted_load)

        # third day
        df, third_day = lf.add_day(df, weather[48:72])
        if third_day.month == tomorrow.month:
            all_X = lf.makeUsefulDf(df, hours_prior=72, noise=15)
            all_y = df['load']
            three_day_predicted_load, three_day_model, three_day_load_accuracy = lf.neural_net_next_day(
                all_X,
                all_y,
                epochs=epochs,
                hours_prior=72,
                save_file=pJoin(modelDir, 'three_day_model.h5'),
                model=(None if ind['newModel'] == 'True' else
                       tf.keras.models.load_model(
                           pJoin(modelDir, ind['three_day_model_filename']))))
            three_day_peak = max(three_day_predicted_load)
        else:
            three_day_peak = 0
            three_day_load_accuracy = {'test': np.nan, 'train': np.nan}

    else:
        two_day_peak = 0
        two_day_load_accuracy = {'test': np.nan, 'train': np.nan}
        three_day_peak = 0
        three_day_load_accuracy = {'test': np.nan, 'train': np.nan}

    tomorrow_peak = max(tomorrow_load)
    m = df[(df['month'] == tomorrow.month) & (df['year'] != tomorrow.year)]
    hourly = m
    m = m.groupby(m.dates.dt.date)['load'].max()
    o['quantile'] = round(
        m[m < tomorrow_peak].shape[0] / float(m.shape[0]) * 100, 2)
    o['predicted_peak'] = [
        m.median(),
        highest_peak_this_month(df, tomorrow), tomorrow_peak, two_day_peak,
        three_day_peak
    ]
    o['predicted_peak_limits'] = [
        [m.min(), m.max()], [0, 0],
        [
            tomorrow_peak * (1 + tomorrow_accuracy['test'] * .01),
            tomorrow_peak * (1 - tomorrow_accuracy['test'] * .01)
        ],
        [
            two_day_peak * (1 + two_day_load_accuracy['test'] * .01),
            two_day_peak * (1 - two_day_load_accuracy['test'] * .01)
        ],
        [
            three_day_peak * (1 + three_day_load_accuracy['test'] * .01),
            three_day_peak * (1 - three_day_load_accuracy['test'] * .01)
        ]
    ]
    m = hourly
    previous_months = [{
        'year': y,
        'load': m[m['year'] == y]['load'].tolist()
    } for y in m.year.unique()]

    # ---------------------- FORMAT FOR DISPLAY ------------------------------- #
    l = []
    for d in previous_months:
        l.append({
            'name': d['year'].item(),
            'color': 'lightgrey',
            'data': d['load'],
            'type': 'line',
            'opacity': .05,
            'enableMouseTracking': False
        })

    load_leading_up = df[(df['month'] == tomorrow.month)
                         & (df['year'] == tomorrow.year)]['load'].tolist()
    l.append({
        'name': tomorrow.year,
        'color': 'black',
        'data': load_leading_up[:-72],
        'type': 'line'
    })
    l.append({
        'name': 'forecast',
        'color': 'blue',
        'data': [None] * (len(load_leading_up) - 72) + o['tomorrow_load'],
        'type': 'line',
        'zIndex': 5
    })

    # add uncertainty

    l.append({
        'name':
        'uncertainty',
        'color':
        '#D57560',
        'opacity':
        0.05,
        'data': [None] * (len(load_leading_up) - 72) + [
            x * round(tomorrow_accuracy['test'], 2) * .01 * 2
            for x in o['tomorrow_load']
        ],
    })

    l.append({
        'id':
        'transparent',
        'color':
        'rgba(255,255,255,0)',
        'data': [None] * (len(load_leading_up) - 72) + [
            x * (1 - round(tomorrow_accuracy['test'], 2) * .01)
            for x in o['tomorrow_load']
        ]
    })

    o['previous_months'] = l

    o['load_test_accuracy'] = round(tomorrow_accuracy['test'], 2)
    o['load_train_accuracy'] = round(tomorrow_accuracy['train'], 2)
    o['tomorrow_test_accuracy'] = round(tomorrow_accuracy['test'], 2)
    o['tomorrow_train_accuracy'] = round(tomorrow_accuracy['train'], 2)
    o['two_day_peak_train_accuracy'] = round(two_day_load_accuracy['train'], 2)
    o['two_day_peak_test_accuracy'] = round(two_day_load_accuracy['test'], 2)
    o['three_day_peak_train_accuracy'] = round(
        three_day_load_accuracy['train'], 2)
    o['three_day_peak_test_accuracy'] = round(three_day_load_accuracy['test'],
                                              2)

    o['peak_percent_chance'] = peak_likelihood(
        hist=highest_peak_this_month(df[:-48], tomorrow),
        tomorrow=tomorrow_peak,
        tomorrow_std=tomorrow_peak * tomorrow_accuracy['test'] * .01,
        two_day=two_day_peak,
        two_day_std=two_day_peak * two_day_load_accuracy['test'] * .01,
        three_day=three_day_peak,
        three_day_std=three_day_peak * three_day_load_accuracy['test'] * .01)

    o['stderr'] = ''

    with open(pJoin(modelDir, 'one_day_model.h5'), 'rb') as f:
        one_day_model = base64.standard_b64encode(f.read()).decode()
    with open(pJoin(modelDir, 'two_day_model.h5'), 'rb') as f:
        two_day_model = base64.standard_b64encode(f.read()).decode()
    with open(pJoin(modelDir, 'three_day_model.h5'), 'rb') as f:
        three_day_model = base64.standard_b64encode(f.read()).decode()

    # re-input values (i.e. modify the mutable dictionary that is used in heavyprocessing!!!!!!)
    ind['newModel'] = 'False',
    ind['one_day_model'] = one_day_model,
    ind['one_day_model_filename'] = 'one_day_model.h5',
    ind['two_day_model'] = two_day_model,
    ind['two_day_model_filename'] = 'two_day_model.h5',
    ind['three_day_model'] = three_day_model,
    ind['three_day_model_filename'] = 'three_day_model.h5',

    return o
# Importing the required libraries
import time
from datetime import datetime as dt

# Specifying the file path for host file
hosts_path = r'C:\Windows\System32\Drivers\etc\hosts'
# Local IP address
redirect = '127.0.0.1'
# List of websites to block
website_list = ['www.facebook.com','facebook.com']

# Infinite loop
while True:
    # Conditional, if time is in working hours
    if dt(dt.now().year,dt.now().month,dt.now().day,12) < dt.now() < dt(dt.now().year,dt.now().month,dt.now().day,20):
        print('Working Hours...')
        # Open the host file
        with open(hosts_path,'r+') as file:
            # Read contents of host file
            content = file.read()
            for website in website_list:
                if website in content:
                    pass
                else:
                    # Write in host file the website to block
                    file.write(redirect + " " + website + "\n")
    else:
        with open(hosts_path,'r+') as file:
            # Read as a list
            content = file.readlines()
            # Place the cursor at the start of file
Example #43
0
from darksky import forecast
from datetime import datetime as dt
from secrets impot API_KEY

lat = -23.496111
lon = -46.619722
key = API_KEY

SP = key, lat, lon
t = dt(2018, 1, 1, 12).isoformat()
sp_forecast = forecast(*SP, time=t)
import time
from datetime import datetime as dt

hosts_temp = "/Users/mikerogove/Documents/GitHub/PythonMegaCourse/Section12/Hosts/hosts"
hosts_path = "/etc/hosts"
redirect = "127.0.0.1"
website_list = [
    "www.facebook.com", "facebook.com", "www.reddit.com", "reddit.com",
    "www.twitch.tv", "twitch.tv", "www.instagram.com", "instgram.com",
    "www.linkedin.com", "linkedin.com"
]

while True:
    if dt(dt.now().year,
          dt.now().month,
          dt.now().day, 17) > dt.now() < dt(dt.now().year,
                                            dt.now().month,
                                            dt.now().day, 21):
        print("Non-browsing hours...")
        with open(hosts_path, 'r+') as file:
            content = file.read()
            for website in website_list:
                if website in content:
                    pass
                else:
                    file.write(redirect + " " + website + "\n")
    else:
        with open(hosts_path, 'r+') as file:
            content = file.readlines()
            file.seek(0)
            for line in content:
# entry should be kept on an individual line. The IP address should
# be placed in the first column followed by the corresponding host name.
# The IP address and the host name should be separated by at least one
# space.
#This Script modifies the host file by adding local host along with the websites to the
#host file between the working hours and it removes those websites if the current time doesn't lie
#in between the working hours as per user requirements
localhost = "127.0.0.1"
hosts_path = r"C:\Windows\System32\drivers\etc\hosts"  #host_file location
block_list = [
    "www.facebook.com", "www.instagram.com"
]  #Website list to Block, Just add to it the site you want to block
while True:

    if dt(dt.now().year,
          dt.now().month,
          dt.now().day, 9) < dt.now() < dt(
              dt.now().year,
              dt.now().month,
              dt.now().day,
              17):  #dt(year,month,day,hour) Change the hour as per your choice
        #if current time is between my working Hours, just block the list of sites contained in block_list
        with open(hosts_path, 'r+') as file:
            content = file.read()
            for websites in block_list:
                if websites not in content:
                    file.write(localhost + " " + websites + "\n")
    else:
        #Free or fun hours

        with open(hosts_path, 'r+') as file:
Example #46
0
 def get_year_string(self):
     now = dt.utcnow()
     year_end = dt(now.year + 1, 1, 1)
     year_start = dt(now.year, 1, 1)
     year_percent = (now - year_start) / (year_end - year_start) * 100
     return f'For your information, the year is {year_percent:.1f}% over!'
Example #47
0
from datetime import timedelta as dt
import matplotlib.pyplot as plt
import iris.plot as iplt
from irise import convert, plot
from irise.diagnostics.fronts import fronts
from myscripts.models.um import case_studies


def main(cubes, p_level):
    # Load data
    theta = convert.calc('air_potential_temperature',
                         cubes,
                         levels=('air_pressure', [p_level]))[0]

    # Calculate the fronts
    loc = fronts.main(theta)
    loc = theta.copy(data=loc)

    # Plot the output
    plot.pcolormesh(theta, vmin=280, vmax=320, cmap='plasma')
    plt.title(r'$\theta$ at ' + str(p_level) + ' Pa')
    iplt.contour(loc, [0], colors='k')
    plt.show()


if __name__ == '__main__':
    forecast = case_studies.iop8()
    cubes = forecast.set_lead_time(dt(hours=36))
    p_level = 65000
    main(cubes, p_level)
redirect = "127.0.0.1"

website_list = [
    'www.xataka.com', 'store.playstation.com', 'www3.animeflv.net',
    'mercadolibre.com.co', 'ebiblioteca.org', 'www.amazon.com',
    'www.lectulandia2.org', 'cuevana3.io', 'www.netflix.com', 'www.olx.com.co',
    'www.disneyplus.com', 'www.bbc.com'
]

from_hour = 6
to_hour = 16

while True:
    if dt(dt.now().year,
          dt.now().month,
          dt.now().day, from_hour) < dt.now() < dt(dt.now().year,
                                                   dt.now().month,
                                                   dt.now().day, to_hour):
        print("Working...")

        with open(hosts_temp, 'r+') as file:
            content = file.read()
            for website in website_list:
                if website in content:
                    pass
                else:
                    file.write(redirect + " " + website + "\n")

    else:
        print("Fun...")
def update_graph(selected_dropdown_value):
    df = web.DataReader(selected_dropdown_value,
                        data_source=df2,
                        start=dt(1, 1),
                        end=dt.now())
    return {'data': [{'x': df.index, 'y': df.Close}]}
Example #50
0
##Search
layout_search = html.Div([
    html.Div([
        html.Label('Location'),
        dcc.Input(id='loc_id', value='Location', type='text')],
        style={'marginLeft': 10, 'marginTop': 10, 'width': '49%', 'display': 'inline-block'}
    ),

    
    html.Div([
        html.Div([
            html.Label('Start date - end date')
            ]),
        dcc.DatePickerRange(
            id='my-date-picker-range',
            min_date_allowed=dt(2019, 8, 5),
            max_date_allowed=dt(2019, 9, 19),
            initial_visible_month=dt(2019, 8, 5),
            
            #end_date=dt(2019, 8, 25)
        )],
        style ={'marginLeft': 10, 'marginTop': 10, 'width': '100%'}
    ),
    
    html.Div([
        html.Label('Number of beds'),
        dcc.Dropdown(
            options=[
                {'label': '1 Bed', 'value': '1'},
                {'label': '2 Beds', 'value': '2'},
                {'label': '3 Beds', 'value': '3'},
Example #51
0
def plot_job_length_hist():
    db = get_isc_db()
    db, om = get_isc_db()
    
    # jobs_df = get_jobs_df(db)
    start_dt = dt(2017, 11, 1)
    end_dt = dt(2018, 12, 10)

    jobs_df = get_job_in_time_range(start_dt, end_dt, db)
    jobs_df = jobs_df[jobs_df['status'] == "Completed"]
    col_conv_to_int = [
        'RLnodect',
        'RUcput',
        'resname'
        ]
    def hash_for_cluster(x):
        return hash(x) % 1000000000

    jobs_df = jobs_df.applymap(lambda x : -2 if  x is np.nan else -1 if x is None else x)

    def hash_everything_else(x):
        try:
            return int(x)
        except:
            return hash_for_cluster(x)

    for c in col_conv_to_int:
        jobs_df[c] = jobs_df[c].map(hash_everything_else)
        jobs_df[c].astype(np.int64)

    start_stop = jobs_df[['epilogue_start',
                          'epilogue_end',
                          'prologue_start',
                          'prologue_end',
                          'moab_start',
                          'moab_end',
                          'start', 'end']]
    
    jobs_df['length'] = start_stop['end'] - start_stop['start']
    jobs_df['moab_length'] = start_stop['moab_end'] - start_stop['moab_start']
    jobs_df['prologue_length'] = start_stop['prologue_end'] - start_stop['prologue_start']
    jobs_df['epilogue_length'] = start_stop['epilogue_end'] - start_stop['epilogue_start']


    categorical_columns = [c for c in jobs_df.columns if not isinstance(jobs_df[c].values[0], numbers.Number)]
    numeric_columns = list(set(jobs_df.columns).difference(set(categorical_columns)))

    jobs_cat_wip_df = jobs_df[categorical_columns].applymap(hash_for_cluster)

    jobs_cat_wip_df.columns = ["hash_{}".format(c) for c in jobs_cat_wip_df.columns]

    jobs_wip_df = pd.concat([jobs_df[numeric_columns], jobs_cat_wip_df], axis=1)

    for c in jobs_wip_df:
        jobs_wip_df[c].loc[~jobs_wip_df[c].notnull()] = -3


    if "PCA" in task:
        n_c = 20
        score_list = {}
        for n_c in range(3, 30):
            print('n_c {}'.format(n_c))
            pca = PCA(n_components=28)
            s = pca.fit_transform(jobs_wip_df)
            #cluster = SpectralClustering(n_clusters=n_c, random_state=0).fit(s)
            #cluster = DBSCAN()
            cluster = KMeans(n_clusters=n_c, random_state=1, n_jobs=2).fit(s)
            pred = cluster.predict(s)
            sample_mask = np.random.choice(range(len(pred)), size=10000, replace=False)
            _s = silhouette_samples(s[sample_mask], pred[sample_mask])

            s_score = [ _s[pred[sample_mask] == i] for i in range(n_c)]
            _ = [i.sort() for i in s_score]
            score_list[n_c] = s_score
            
            pca = PCA(n_components=3)
            s = pca.fit_transform(jobs_wip_df)
        plt_index = 1
        plt.suptitle('Sample silhouette scores for various class sizes')
        for k, s_score in score_list.items():
            if k in [3, 4, 5, 7, 8, 9, 15, 26, 27]:
                pass
            else:
                continue
            plt.subplot(3,3,plt_index)
            for j, i in enumerate(s_score):
                r = list(zip(*enumerate(i)))
                plt.plot(r[1], r[0] , label=j)
            plt.title('Num Classes {}'.format(k))
            if plt_index in [7,8,9]:
                plt.xlabel('s score')
            if plt_index in [1, 4, 7]:
                plt.ylabel('sample index')
            plt_index += 1
        plt.legend(loc='center left', bbox_to_anchor=(1, 2))
            # plt.figure()
        #for j, i in enumerate(s_score):
        #    plt.plot(*zip(*enumerate(i)), label=j)
        #plt.legend()

        plt.figure()
        plt_index = 1
        for k, i in list(score_list.items()):
            means = [l.mean() for l in i]
            if (k-2)%3 == 0:
                plt.legend()
                plt.subplot(3,3,plt_index)
                plt_index += 1
            plt.hist(means, bins, alpha=.3, label=k)

        plt.figure()
        means_tot = [np.mean([l.mean() for l in i]) for k, i in list(score_list.items())]
        median_tot = [np.median([np.median(l) for l in i]) for k, i in list(score_list.items())]
        plt.title('Scores agrigated for each class size')
        plt.plot(range(3,30), means_tot, label="Mean")
        plt.plot(range(3,30), median_tot, label="Median")
        plt.xlabel("Class size")
        plt.ylabel("S Score")
        plt.legend()

        for n_c in [8, 15]:
            print('n_c {}'.format(n_c))
            pca = PCA(n_components=28)
            s = pca.fit_transform(jobs_wip_df)
            #cluster = SpectralClustering(n_clusters=n_c, random_state=0).fit(s)
            #cluster = DBSCAN()
            cluster = KMeans(n_clusters=n_c, random_state=1, n_jobs=2).fit(s)
            pred = cluster.predict(s)
            sample_mask = np.random.choice(range(len(pred)), size=10000, replace=False)
            pca = PCA(n_components=3)
            s = pca.fit_transform(jobs_wip_df)

            fig = plt.figure()

            ax = Axes3D(fig)

            for i in range(n_c):
                ax.scatter(*list(zip(*(s[sample_mask][pred[sample_mask] == i]))), '.', label=i)

    if "kmean" in task:
        score_list = []
        rms_list = []
        for n_clusters in range(100, 120):
            tick = time.time()
            print('kmeans for n_clusers {}'.format(n_clusters))
            kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(jobs_wip_df.values)

            pred = kmeans.predict(jobs_wip_df.values)
            sample_mask = np.random.choice(range(len(pred)), size=10000, replace=False)

            _s = silhouette_samples(jobs_wip_df.values[sample_mask], pred[sample_mask])

            score_list.append(_s)

            labeled = list(sorted(zip(pred, jobs_wip_df.values), key=lambda x : x[0]))
            grouped_classes = itertools.groupby(labeled, key=lambda x : x[0] )
            _rSq = 0
            for k_label, k_list in grouped_classes:
                k_mean_displace = kmeans.cluster_centers_[k_label] - np.mean(list(zip(*k_list))[1], axis=0)
                _rSq += sum(k_mean_displace*k_mean_displace)
            rms_list.append(np.sqrt(_rSq))
            print('_rSq {} time {:.4f} min'.format(rms_list[-1], (time.time() - tick)/60))
        
    fig, ax = plt.subplots()
    
    ax.set_yscale('log')

    start_stop['length'].hist(bins=range(0, 200000, 120))
Example #52
0
TIER_TWO = MarriageBotPerks(max_children=15,
                            can_run_bloodtree=True,
                            can_run_disownall=True,
                            can_run_abandon=True,
                            tree_command_cooldown=15,
                            tree_render_quality=2)
TIER_ONE = MarriageBotPerks(max_children=10,
                            can_run_disownall=True,
                            tree_command_cooldown=15,
                            tree_render_quality=1)
TIER_VOTER = MarriageBotPerks(tree_command_cooldown=30, )
TIER_NONE = MarriageBotPerks()

CACHED_PERK_ITEMS = collections.defaultdict(lambda: (
    None,
    dt(2000, 1, 1),
))


def cache_response(**lifetime):
    def inner(func):
        async def wrapper(bot: vbu.Bot, user_id: int):
            perks, expiry_time = CACHED_PERK_ITEMS[user_id]
            if expiry_time > dt.utcnow():
                return perks  # Cache not expired
            perks = await func(bot, user_id)
            CACHED_PERK_ITEMS[user_id] = (
                perks,
                dt.utcnow() + timedelta(**lifetime),
            )
            return perks
Example #53
0
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title = "CoSim"

# these are the controls where the parameters can be tuned.
# They are not placed on the screen here, we just define them.
controls = dbc.Card(
    [
        dbc.FormGroup([
            dbc.Label('Date of first infection'),
            html.Br(),
            dcc.DatePickerSingle(day_size=39,
                                 display_format="DD.MM.YYYY",
                                 date='2020-01-01',
                                 id='initial_date',
                                 min_date_allowed=dt(2019, 10, 1),
                                 max_date_allowed=dt(2020, 5, 31),
                                 initial_visible_month=dt(2020, 1, 15),
                                 placeholder="test"),
        ]),
        dbc.FormGroup([
            dbc.Label("Initial Cases"),
            dbc.Input(
                id="initial_cases",
                type="number",
                placeholder="initial_cases",
                min=1,
                max=1_000_000,
                step=1,
                value=10,
            )
Example #54
0
def create_layout(app):
    # Page layouts
    return html.Div([
        #page1
        dcc.Interval(id='interval-cer', interval=5000, n_intervals=0),
        html.H2('Certification'),
        html.Div([
            html.Label('Product Name'),
            dcc.Dropdown(
                id='p2_cer_product',
                options=[{
                    'label': i,
                    'value': j
                } for i, j in zip(projects, projects_id)],
            )
        ],
                 style=drop_style),
        html.Div([
            html.Label('Test Date'),
            dcc.DatePickerSingle(
                id='p2_cer_date',
                min_date_allowed=dt(2000, 1, 1),
                max_date_allowed=dt(2099, 12, 31),
                initial_visible_month=dt.today(),
            )
        ]),
        html.H4('Certification Status'),
        html.Div(
            [
                html.Div(generate_dropdown_table(df_status)),
            ],
            style={
                'width': '100%',
                'height': '100%',
                'float': 'left',
                'display': 'inline-block'
            }),
        html.H4('Link'),
        html.Div(
            [
                html.Div(generate_table_link(df_link)),
            ],
            style={
                'width': '100%',
                'height': '100%',
                'float': 'left',
                'display': 'inline-block'
            }),
        html.H4('Certification Date(YYYY-MM-DD)'),
        html.Div(
            [
                html.Div(generate_table(df_date)),
            ],
            style={
                'width': '100%',
                'height': '100%',
                'float': 'left',
                'display': 'inline-block'
            }),
        html.Div([
            dcc.ConfirmDialogProvider(
                children=html.Button('Submit', ),
                id='p2_provider_cer',
                message='Do you want to update the information?'),
        ]),
        #row5
        html.Div(
            [html.Div(id='p2_output-provider_cer')],
            style={
                'width': '100%',
                'height': '100%',
                'float': 'left',
                'display': 'inline-block'
            }),
        html.Br(),
    ])
arg_perigee = 270.5910  # 近点引数(度)
mean_anomaly = 347.7789  # 平均近点角(度)
mean_motion = 1.00258630  # 平均回転数(周回/日)
decay_rate = 0.00000228  # 軌道減衰率(周回/日^2)
orbit_num = 2543  # これまでの積算周回数

# epoch_dateをXEphem用の形式に変換
year = int(epoch_date[:2])
if (year >= 57):
    year += 1900
else:
    year += 2000
dayno = float(epoch_date[2:])
epoch_date_sl = "1/%s/%s" % (dayno, year)

# epoch_dateから一分ごとの時刻を3600個作成
decimal_time = dt(year, 1, 1) + td(dayno - 1)
time_list = [(decimal_time + td(hours=i / 60)).strftime("%Y/%m/%d %H:%M:%S")
             for i in range(0, 3600)]

orbit_data = ",".join(
    map(str,
        (object_name, "E", epoch_date_sl, inclination, RAofAN, eccentricity,
         arg_perigee, mean_anomaly, mean_motion, decay_rate, orbit_num)))
satellite = ephem.readdb(orbit_data)

satellite.compute(time_list[0])
# satellite.compute(epoch_date_sl)
print('%.10f' % satellite.sublat)
print('%.10f' % satellite.sublong)
Example #56
0
    def setUp(self):
        """Create a cube containing a regular lat-lon grid and other necessary
        ingredients for unit tests."""

        data = np.arange(0, 800, 1)
        data.resize(2, 20, 20)
        latitudes = np.linspace(-90, 90, 20)
        longitudes = np.linspace(-180, 180, 20)
        latitude = DimCoord(latitudes,
                            standard_name='latitude',
                            units='degrees',
                            var_name='latitude')
        longitude = DimCoord(longitudes,
                             standard_name='longitude',
                             units='degrees',
                             var_name='longitude')

        # Use time of 2017-02-17 06:00:00, 07:00:00
        time = DimCoord([1487311200, 1487314800],
                        standard_name='time',
                        units=cf_units.Unit(
                            'seconds since 1970-01-01 00:00:00',
                            calendar='gregorian'),
                        var_name='time')

        time_dt = dt(2017, 2, 17, 6, 0)
        time_extract = Constraint(time=PartialDateTime(
            time_dt.year, time_dt.month, time_dt.day, time_dt.hour))

        cube = Cube(data,
                    long_name="air_temperature",
                    dim_coords_and_dims=[(time, 0), (latitude, 1),
                                         (longitude, 2)],
                    units="K")
        cube2 = cube.copy()

        orography = Cube(np.ones((20, 20)),
                         long_name="surface_altitude",
                         dim_coords_and_dims=[(latitude, 0), (longitude, 1)],
                         units="m")

        land = orography.copy()
        land.rename('land_binary_mask')
        land.data = land.data + 1

        ancillary_data = {}
        ancillary_data.update({'orography': orography})
        ancillary_data.update({'land_mask': land})

        # Copies of cube simply renamed to be read as additional data.
        temperature_on_height_levels = cube.copy()
        temperature_on_height_levels.rename('temperature_on_height_levels')
        pressure_on_height_levels = cube.copy()
        pressure_on_height_levels.rename('pressure_on_height_levels')
        surface_pressure = cube.copy()
        surface_pressure.rename('surface_pressure')

        # Build reference copy of additional_data dictionary.
        additional_data = {
            'temperature_on_height_levels':
            CubeList([temperature_on_height_levels]),
            'pressure_on_height_levels':
            CubeList([pressure_on_height_levels]),
            'surface_pressure':
            CubeList([surface_pressure])
        }

        self.data_directory = mkdtemp()

        self.cube_file = os.path.join(self.data_directory,
                                      '01-temperature_at_screen_level.nc')
        self.cube_file2 = os.path.join(self.data_directory,
                                       '02-temperature_at_screen_level.nc')
        orography_file = os.path.join(self.data_directory, 'orography.nc')
        land_file = os.path.join(self.data_directory, 'land_mask.nc')
        ad_file_temperature = os.path.join(self.data_directory,
                                           'temperature_on_height_levels.nc')
        ad_file_pressure = os.path.join(self.data_directory,
                                        'pressure_on_height_levels.nc')
        ad_file_s_pressure = os.path.join(self.data_directory,
                                          'surface_pressure.nc')

        save_netcdf(cube, self.cube_file)
        save_netcdf(cube2, self.cube_file2)
        save_netcdf(orography, orography_file)
        save_netcdf(land, land_file)
        save_netcdf(temperature_on_height_levels, ad_file_temperature)
        save_netcdf(pressure_on_height_levels, ad_file_pressure)
        save_netcdf(surface_pressure, ad_file_s_pressure)

        diagnostic_recipe = {
            "temperature": {
                "diagnostic_name": "air_temperature",
                "extrema": True,
                "filepath": "temperature_at_screen_level",
                "neighbour_finding": {
                    "land_constraint": False,
                    "method": "fast_nearest_neighbour",
                    "vertical_bias": None
                }
            }
        }

        self.config_file = os.path.join(self.data_directory,
                                        'spotdata_diagnostics.json')
        ff = open(self.config_file, 'w')
        json.dump(diagnostic_recipe,
                  ff,
                  sort_keys=True,
                  indent=4,
                  separators=(
                      ',',
                      ': ',
                  ))
        ff.close()

        self.made_files = [
            self.cube_file, self.cube_file2, orography_file, land_file,
            ad_file_temperature, ad_file_pressure, ad_file_s_pressure,
            self.config_file
        ]

        self.cube = cube
        self.cube2 = cube2
        self.temperature_on_height_levels = temperature_on_height_levels
        self.ancillary_data = ancillary_data
        self.additional_data = additional_data
        self.time_extract = time_extract
    xTrain, xTest, yTrain, yTest = train_test_split(
        X, y, test_size=1 / (end.year - start.year + 1), shuffle=False)

    return df, xTrain, xTest, yTrain, yTest


if __name__ == '__main__':
    #===========================================================================
    # build to handle lower and uppercase tickers
    #===========================================================================

    data = 'fangs'
    against = 'fangs'
    minShift = 1
    maxShift = 100
    start = dt(2015, 1, 1)
    end = dt(2018, 12, 31)

    clf = MLPRegressor(hidden_layer_sizes=[100, 100, 100], max_iter=200)

    ticker = 'googl'
    sigVal = 0.7

    df, xTrain, xTest, yTrain, yTest = getData(start, end, ticker, sigVal,
                                               data, against, minShift,
                                               maxShift)

    clf.fit(xTrain, yTrain)
    trainScore = clf.score(xTrain, yTrain)
    """
    if trainScore > 0.6:
Example #58
0
def utc2gps(utcTime):
    #+==============================================================================+
    # utc2gps : Convert UTC time to GPS time expressed in GPS week and GPS
    # second of week.
    #
    #
    # INPUTS
    #
    # utcTime --------- The UTC time and date expressed as a Python datetime.
    #
    #
    # OUTPUTS
    #
    # gpsWeek --------- The unambiguous GPS week number where zero corresponds to
    # 					midnight on the evening of 5 January/morning of 6 January,
    # 					1980. By unambiguous is meant the full week count since
    # 					the 1980 reference time (no rollover at 1024 weeks).
    #
    # gpsSec ---------- The GPS time of week expressed as GPS seconds from midnight
    # 					on Saturday.
    #
    #+------------------------------------------------------------------------------+
    # References:
    #
    # 1: 'Global positioning system directorate systems engineering & integration
    #		interface specification IS-GPS-200', IS-GPS-200H-003, Dec 9, 2015
    #
    #
    # Author: Caleb North
    #+==============================================================================+

    from datetime import datetime as dt
    from datetime import timedelta
    import sys
    from gpsLeaps import gpsLeaps

    ## Verify that the time is valid
    gpsStart = dt(1980, 1, 6)
    if utcTime < gpsStart:
        sys.exit('ERROR: The time in question occurs before GPS time begins.')

    ## Find number of Leaps
    leaps = 0
    LeapTable = gpsLeaps()
    for leap in LeapTable:
        if utcTime > leap:
            leaps += 1

    ## Find GPS time
    gpsTime = utcTime + timedelta(seconds=leaps)
    gpsWeek = (gpsTime - gpsStart).days / 7
    gpsSec = (gpsTime - gpsStart - timedelta(days=gpsWeek * 7)).total_seconds()

    ## Warn if the LeapTable may be outdated
    if utcTime > LeapTable[-1]:
        print ''
        print 'Warning: The last known leap second occured: ', LeapTable[-1]
        print 'It would be wise to verify there have not been additional leaps.'
        print ''

    return gpsWeek, gpsSec
Example #59
0
import time
from datetime import datetime as dt

hostsfile_path = "/home/vatsan/github/py-tries/hosts-pyex"
redirect_ip = "127.0.0.1"
website_list = [
    "www.facebook.com", "www.facebook.com", "www.dub119.mail.live.com"
]

while True:
    if (dt(dt.now().year,
           dt.now().month,
           dt.now().day, 8) < dt.now() < dt(dt.now().year,
                                            dt.now().month,
                                            dt.now().day, 18)):
        with open(hostsfile_path, 'r+') as file:
            content = file.read()
            for website in website_list:
                if website in content:
                    pass
                else:
                    file.write(redirect_ip + "   " + website + "\n")
        print("Working Hours")
    else:
        with open(hostsfile_path, 'r+') as file:
            content = file.readlines()
            file.seek(0)
            for line in content:
                if not any(website in line for website in website_list):
                    file.write(line)
            file.truncate()
Example #60
0
from darksky.types import languages, units, weather
from datetime import datetime as dt
from datetime import timedelta
import csv
import json
import jsonpickle

API_KEY = 'f40beeca4915d42a90c66737b1465346'

# Synchronous way
darksky = DarkSky(API_KEY)
#BOGNOR REGIS, West Sussex coordinates where are tomatoes production
latitude = 50.8333
longitude = -0.6332
darksky = DarkSky(API_KEY)
t = dt(2015, 1, 2, 12)
t_limit = dt(2019, 12, 28, 12)
#open file and update in append mode
with open('temperatures.csv', 'a', newline='') as csvfile:
    fieldnames = ['day', 'temperature_max', 'temperature_min', 'humidity', \
    'precip_intensity', 'precip_intensity_max', 'pressure', 'visibility', 'wind_speed']
    writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
    writer.writeheader()
    while (t < t_limit):
        dias = timedelta(days=20)
        t = t + dias
        # Synchronous way Time Machine
        forecast = darksky.get_time_machine_forecast(
            latitude,
            longitude,
            extend=False,  # default `False`