예제 #1
0
 def runTest(self):
     cal=WorkCalendar([8,16])
     date1=date(2011,12,16) # a FRI close to year end and holidays
     self.assertEqual(cal.networkdays(date1,date1),1,'networkdays')
     self.assertEqual(cal.networkdays(date1,date(2011,12,17)),1,'networkdays')
     self.assertEqual(cal.networkdays(date1,date(2011,12,18)),1,'networkdays')
     self.assertEqual(cal.networkdays(date1,date(2011,12,19)),2,'networkdays')
     date2=date(2012,1,9) # a MON close to next year begin
     self.assertEqual(cal.networkdays(date1,date2),17,'networkdays')
     cal.addholidays(date(1,12,25)) #Christmas was a SUN in 2011
     self.assertEqual(cal.networkdays(date1,date2),17,'networkdays')
     cal.addholidays(date(1,1,1)) #NewYear was also a SUN in 2012...
     self.assertEqual(cal.networkdays(date1,date2),17,'networkdays')
     cal.addholidays(date(2012,1,2)) #...so the 2th was holiday
     self.assertEqual(cal.networkdays(date1,date2),16,'networkdays')
     cal.addholidays(days(date(2011,12,19),21)) #in fact, the company was closed 3 weeks
     self.assertEqual(cal.networkdays(date1,date2),2,'networkdays')
     
             
     print cal.cast(datetime(2011,12,16,19))
     print cal.cast(datetime(2012,1,9,3))
     print cal.cast(datetime(2012,1,9,17))
     
     self.assertEqual(cal.diff(date1,date2),timedelta(hours=8),'diff')
     self.assertEqual(cal.diff(date2,date1),timedelta(hours=-8),'diff')
     self.assertEqual(cal.gethours(date1,date2),8.,'gethours')
     
     self.assertEqual(cal.workday(date1,1),date2, 'workday')
     self.assertEqual(cal.workday(date2,-1),date1, 'workday %s != %s'%(cal.workday(date2,-1),date1))
예제 #2
0
def main():

	#put data in right form
	X0,Y0,X1,Y1 = arrange_data()
	
	#get predictions
	win_predict,prob_predict = predict_winners_and_probs(X0,Y0,X1)
	spread_predict,homescore_predict,awayscore_predict,totalscore_predict = regress_spread_and_scores(X0,Y0,X1)
	
	#reshape data
	win_predict = win_predict.reshape((len(win_predict),1))
	prob_predict = prob_predict[:,1]
	prob_predict = prob_predict.reshape((len(win_predict),1))
	spread_predict = spread_predict.reshape((len(spread_predict),1))
	homescore_predict = homescore_predict.reshape((len(homescore_predict),1))
	awayscore_predict = awayscore_predict.reshape((len(homescore_predict),1))
	totalscore_predict = totalscore_predict.reshape((len(homescore_predict),1))

	df = DataFrame.from_csv(open('final.csv'))

	#build new dataframe, with columns for predicted values
	df_selected = df[datetime(2013,1,1):datetime(2013,4,18)]
	df_selected['win_predict'] = win_predict
	df_selected['prob_predict'] = prob_predict
	df_selected['spread_predict'] = spread_predict
	df_selected['homescore_predict'] = homescore_predict
	df_selected['awayscore_predict'] = awayscore_predict
	df_selected['totalscore_predict'] = totalscore_predict
	
	df_selected.to_csv(open('predictions.csv','wb'))
def bytes_to_donnees(file_path):
    file_data = open(file_path,'rb')
    liste_donees = ms.load(file_data)
    print(len(liste_donnees))
    dates = []
    print(liste_donnees[0])
    for date in liste_donnees[0]:
        print(date)
        jours = int(date[0:2])
        mois= int(date[3:5])
        annees = int(date[6:10])
        heures = int(date[11:13])
        minutes = int(date[14:16])
        secondes = int(date[17:19])
        dates.append(datetime(annee,mois,jours,heures,minutes,secondes))
    effacement = []
    for effacements in liste_donnees[3]:
        datetime = effacements[0]
        jours = int(datetime[0:2])
        mois= int(datetime[3:5])
        annee = int(datetime[6:10])
        heures = int(datetime[11:13])
        minutes = int(datetime[14:16])
        secondes = int(datetime[17:19])
        effacement.append((datetime(annee,mois,jours,heures,minutes,secondes),effacements[1]))
    return([dates,liste_donnees[1],effacement,liste_donnees[3],liste_donnees[4],liste_donnees[5]])
예제 #4
0
    def test_sum_of_distance(self):
        self.data = []
        self.data.append(mySport(starttime=datetime(year=2015, month=11, day=24, hour=22, minute=0, second=0), \
                                 endtime=datetime(year=2015, month=11, day=24, hour=22, minute=10, second=0), \
                                 distance=20))
        self.result = achievement_judger_in_sport_everyday(self.data)
        assert (3 not in self.result), 'incorrect result in sum of distance step 1'
        self.data.append(mySport(starttime=datetime(year=2015, month=11, day=24, hour=22, minute=13, second=0), \
                                 endtime=datetime(year=2015, month=11, day=24, hour=22, minute=20, second=0), \
                                 distance=4990))
        self.result = achievement_judger_in_sport_everyday(self.data)
        assert (3 in self.result), 'incorrect result in sum of distance step 2'

        self.data = []
        self.data.append(mySport(starttime=datetime(year=2015, month=11, day=24, hour=22, minute=0, second=0), \
                                 endtime=datetime(year=2015, month=11, day=24, hour=22, minute=10, second=0), \
                                 distance=20000))
        self.result = achievement_judger_in_sport_everyday(self.data)
        assert (13 not in self.result), 'incorrect result in sum of distance step 3'
        self.data.append(mySport(starttime=datetime(year=2015, month=11, day=24, hour=22, minute=17, second=0), \
                                 endtime=datetime(year=2015, month=11, day=24, hour=22, minute=20, second=0), \
                                 distance=10000))
        self.result = achievement_judger_in_sport_everyday(self.data)
        assert (13 not in self.result), 'incorrect result in sum of distance step 4'
        self.data.append(mySport(starttime=datetime(year=2015, month=11, day=24, hour=23, minute=17, second=0), \
                                 endtime=datetime(year=2015, month=11, day=24, hour=23, minute=20, second=0), \
                                 distance=40000))
        self.result = achievement_judger_in_sport_everyday(self.data)
        assert (13 in self.result), 'incorrect result in sum of distance step 5'
        print('test_sum_of_distance DONE!')
    def test_will_filter_for_january(self):
        start_of_january = workhours.build_from_date(datetime(2014, 1, 1))
        hours = [start_of_january]

        result = hours_filtering.filter_by__current_worksheet_month(datetime(2014, 1, 1), hours)

        assert start_of_january in result
예제 #6
0
파일: views.py 프로젝트: areski/a2b-cust
def call_detail(request):
    if request.method == 'POST'or request.method == 'GET' and "card_id" in request.session:
        kwargs = {}
        if request.method == 'POST':             
             phone_no        = request.POST['phone_no'].strip()
             phone_no_type   = request.POST['phone_no_type']
             if "fromday_chk" in request.POST:
                 fromday_chk = 'on'
             else:
                 fromday_chk = 'off'
             if "today_chk" in request.POST:
                 today_chk = 'on'
             else:
                 today_chk = 'off'

             call_type       = request.POST['call_type']
             from_day        = int(request.POST['from_day'])
             from_month_year = request.POST['from_month_year']
             from_year       = int(request.POST['from_month_year'][0:4])
             from_month      = int(request.POST['from_month_year'][5:7])
             to_day          = int(request.POST['to_day'])
             to_month_year   = request.POST['to_month_year']
             to_year         = int(request.POST['to_month_year'][0:4])
             to_month        = int(request.POST['to_month_year'][5:7])
             start_date      = datetime(from_year,from_month,from_day)
             end_date        = datetime(to_year,to_month,to_day)
             show            = request.POST['show']
             result          = request.POST['result']
             currency        = request.POST['currency']
             

             if fromday_chk == 'on' and today_chk == 'on':
                 kwargs[ 'starttime__range' ] = (start_date, end_date)
             if fromday_chk == 'on' and today_chk != 'on' :
                 kwargs[ 'starttime__gte' ] = start_date
             if today_chk == 'on' and fromday_chk != 'on':
                 kwargs[ 'starttime__lte' ] = end_date
                 
             if phone_no != '':
                 if phone_no_type == '1':
                     kwargs[ 'calledstation__exact' ] = phone_no
                 if phone_no_type == '2':
                     kwargs[ 'calledstation__startswith' ] = phone_no
                 if phone_no_type == '3':
                     kwargs[ 'calledstation__contains' ] = phone_no
                 if phone_no_type == '4':
                     kwargs[ 'calledstation__endswith' ] = phone_no
             
             if call_type != '' and call_type != '-1':
                 calltype_list = call_type_list()
                 for i in calltype_list:
                     if int(i[0]) == int(call_type) :
                         kwargs[ 'sipiax' ] = call_type
             
             if show == 'ANSWER':
                 kwargs[ 'terminatecauseid__exact' ] = '1'
             if show == 'ALL':
                 list = dial_status_list()
                 kwargs[ 'terminatecauseid__in' ] = (l[0]  for l in list)
             form = 
예제 #7
0
def parse(root, UNITS):
    value = root.find("./pod[@id='Result']").find('subpod').find('plaintext').text

    print value
    if value.startswith('~~ '):
        value = value.strip('~~ ')
    m = __number_re.search(value)

    if m:
        QUANTITY = float(m.group(1))
        UNIT = m.group(2).lower()

        if "trillion" in UNIT:
            QUANTITY *= pow(10, 12)
        elif "billion" in UNIT:
            QUANTITY *= pow(10, 9)
        elif "million" in UNIT:
            QUANTITY *= pow(10, 6)
        elif "thousand" in UNIT:
            QUANTITY *= pow(10, 3)

        elif "date" in UNITS:

            try:
                print "F**K YOU 2"
                dt = dateparse(str(int(QUANTITY)))    
                QUANTITY = (dt - datetime.datetime(1970, 1, 1)).total_seconds()

            except Exception as e:

                raise NameError("Exception")

        if not UNITS:
            if "$" in value:
                UNIT = "dollars"
        else:
            UNIT = UNITS

    else:
        # check if it is a date
        try:
            print value
            if len(value) == 4:
                epoch = datetime(1970, 1, 1)
                t = datetime(int(value), 1, 1)
                diff = t-epoch
                QUANTITY = diff.total_seconds()
                print QUANTITY
            else:
                print "Not 4 chars"
                print value
                dt = dateparse(value)
                QUANTITY = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
            UNIT = "date"

        except:
            raise NameError('Could not parse!')

    print QUANTITY
    return (QUANTITY, UNIT)
예제 #8
0
def test_DateFormatter():
    import pylab
    from datetime import datetime
    import matplotlib.testing.jpl_units as units
    units.register()

    # Lets make sure that DateFormatter will allow us to have tick marks
    # at intervals of fractional seconds.

    t0 = datetime( 2001, 1, 1, 0, 0, 0 )
    tf = datetime( 2001, 1, 1, 0, 0, 1 )

    fig = pylab.figure()
    ax = pylab.subplot( 111 )
    ax.set_autoscale_on( True )
    ax.plot( [t0, tf], [0.0, 1.0], marker='o' )

    # rrule = mpldates.rrulewrapper( dateutil.rrule.YEARLY, interval=500 )
    # locator = mpldates.RRuleLocator( rrule )
    # ax.xaxis.set_major_locator( locator )
    # ax.xaxis.set_major_formatter( mpldates.AutoDateFormatter(locator) )

    ax.autoscale_view()
    fig.autofmt_xdate()

    fig.savefig( 'DateFormatter_fractionalSeconds' )
예제 #9
0
def test_RRuleLocator():
    import pylab
    import matplotlib.dates as mpldates
    import matplotlib.testing.jpl_units as units
    from datetime import datetime
    import dateutil
    units.register()

    # This will cause the RRuleLocator to go out of bounds when it tries
    # to add padding to the limits, so we make sure it caps at the correct
    # boundary values.
    t0 = datetime( 1000, 1, 1 )
    tf = datetime( 6000, 1, 1 )

    fig = pylab.figure()
    ax = pylab.subplot( 111 )
    ax.set_autoscale_on( True )
    ax.plot( [t0, tf], [0.0, 1.0], marker='o' )

    rrule = mpldates.rrulewrapper( dateutil.rrule.YEARLY, interval=500 )
    locator = mpldates.RRuleLocator( rrule )
    ax.xaxis.set_major_locator( locator )
    ax.xaxis.set_major_formatter( mpldates.AutoDateFormatter(locator) )

    ax.autoscale_view()
    fig.autofmt_xdate()

    fig.savefig( 'RRuleLocator_bounds' )
예제 #10
0
def getDateIndex(day, month, year):
    dayOfYear = (datetime(year,month,day) - datetime(year, 3, 21)).days
    if dayOfYear < 0:
       dayOfYear += 365
    degree = ((360*dayOfYear)/365)
    zodiac = floor(degree/30)
    return zodiac
예제 #11
0
def convertDateTime(datestring):
    """
    Takes in a string representing the date and time in
    Hubbard Brook format and return a datetime object
    """
    if datestring[0] == "A": #MODIS format
        year = int(datestring[1:5])
        day = int(datestring[5:])
        return datetime(year, 1, 1, 0, 0, 0) + timedelta(day-1)

    if datestring[0] == "(": #datetime object format
        d = eval(datestring)
        return datetime(d[0], d[1], d[2], d[3], d[4], d[5])

    try: #MATLAB format
        datestring=float(datestring)
        return datetime.fromordinal(int(datestring)) + timedelta(days=datestring%1) - timedelta(days = 366)

    except: #Hubbard Brook Format
        year = int(datestring[:4])
        month = int(datestring[5:7])
        date = int(datestring[8:10])
        hour = int(datestring[11:13])
        minute = int(datestring[14:16])
        seconds = int(datestring[17:19])
        return datetime(year, month, date, hour, minute, seconds)
예제 #12
0
    def test_vectorized_udf_timestamps_respect_session_timezone(self):
        schema = StructType([
            StructField("idx", LongType(), True),
            StructField("timestamp", TimestampType(), True)])
        data = [(1, datetime(1969, 1, 1, 1, 1, 1)),
                (2, datetime(2012, 2, 2, 2, 2, 2)),
                (3, None),
                (4, datetime(2100, 3, 3, 3, 3, 3))]
        df = self.spark.createDataFrame(data, schema=schema)

        f_timestamp_copy = pandas_udf(lambda ts: ts, TimestampType())
        internal_value = pandas_udf(
            lambda ts: ts.apply(lambda ts: ts.value if ts is not pd.NaT else None), LongType())

        timezone = "America/New_York"
        with self.sql_conf({
                "spark.sql.execution.pandas.respectSessionTimeZone": False,
                "spark.sql.session.timeZone": timezone}):
            df_la = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
                .withColumn("internal_value", internal_value(col("timestamp")))
            result_la = df_la.select(col("idx"), col("internal_value")).collect()
            # Correct result_la by adjusting 3 hours difference between Los Angeles and New York
            diff = 3 * 60 * 60 * 1000 * 1000 * 1000
            result_la_corrected = \
                df_la.select(col("idx"), col("tscopy"), col("internal_value") + diff).collect()

        with self.sql_conf({
                "spark.sql.execution.pandas.respectSessionTimeZone": True,
                "spark.sql.session.timeZone": timezone}):
            df_ny = df.withColumn("tscopy", f_timestamp_copy(col("timestamp"))) \
                .withColumn("internal_value", internal_value(col("timestamp")))
            result_ny = df_ny.select(col("idx"), col("tscopy"), col("internal_value")).collect()

            self.assertNotEqual(result_ny, result_la)
            self.assertEqual(result_ny, result_la_corrected)
def xlrd_to_date(cv):
    if not str(cv).isalpha() and len(str(cv)) > 1:
        from1900to1970 = datetime(1970,1,1) - datetime(1900,1,1) + timedelta(days=2)
        print cv
        value = date.fromtimestamp( int(cv) * 86400) - from1900to1970
        print value
        return value
예제 #14
0
 def get_daily_rate(self, ids, date_from, context={}):
     obj = self.browse(ids)[0]
     d_from = datetime.strptime(date_from, "%Y-%m-%d")
     year = d_from.year
     d0 = datetime(year, 1, 1)
     d1 = datetime(year, 12, 31)
     year_days = (d1 - d0).days + 1
     if obj.dep_method == "line":
         year_rate = (obj.price_purchase - (obj.salvage_value or 0)) * obj.dep_rate / 100
         day_rate = year_rate / year_days
     elif obj.dep_method == "decline":
         d = datetime.strptime(obj.date_purchase, "%Y-%m-%d")
         amt = obj.price_purchase - (obj.salvage_value or 0)
         year_rate = amt * obj.dep_rate / 100
         n = 0
         while d < d_from:
             d += relativedelta(months=1)
             n += 1
             if n % 12 == 0:
                 amt -= year_rate
                 year_rate = amt * obj.dep_rate / 100
         day_rate = year_rate / year_days
     else:
         raise Exception("Invalid depreciation method for fixed asset %s" % obj.number)
     return day_rate
예제 #15
0
def getDateIndex(day, month, year):
    daysSinceSolstice = (datetime(year,month,day) - datetime(year, 3, 21)).days
    if daysSinceSolstice < 0:
       daysSinceSolstice += 365
    celestialLongitude = daysSinceSolstice*(360/365)
    zodiac = floor(celestialLongitude/30)
    return zodiac
예제 #16
0
 def testSaveFormData_6(self):
     """ Test nested repeated form definition created and data saved """
     create_xsd_and_populate("6_nestedrepeats.xsd", "6_nestedrepeats.xml", self.domain)
     cursor = connection.cursor()
     cursor.execute("SELECT * FROM schema_basicdomain_xml_nestedrepeats")
     row = cursor.fetchone()
     self.assertEquals(row[9],"foo")
     self.assertEquals(row[10],"bar")
     self.assertEquals(row[11],"yes")
     self.assertEquals(row[12],"no")
     cursor.execute("SELECT * FROM schema_basicdomain_xml_nestedrepeats_root_nested")
     row = cursor.fetchall()
     self.assertEquals(row[0][1],"userid0")
     self.assertEquals(row[0][2],"deviceid0")
     if settings.DATABASE_ENGINE=='mysql' :
         self.assertEquals(row[0][3],datetime(2009,10,9,11,4,30) )
         self.assertEquals(row[0][4],datetime(2009,10,9,11,9,30) )
     else:
         self.assertEquals(row[0][3],"2009-10-9 11:04:30" )
         self.assertEquals(row[0][4],"2009-10-9 11:09:30" )
     self.assertEquals(row[0][5],1)
     self.assertEquals(row[1][1],"userid2")
     self.assertEquals(row[1][2],"deviceid2")
     if settings.DATABASE_ENGINE=='mysql' :
         self.assertEquals(row[1][3],datetime(2009,11,12,11,11,11) )
         self.assertEquals(row[1][4],datetime(2009,11,12,11,16,11) )
     else:
         self.assertEquals(row[1][3],"2009-11-12 11:11:11" )
         self.assertEquals(row[1][4],"2009-11-12 11:16:11" )
     self.assertEquals(row[1][5],1)
예제 #17
0
def get_qtd_actions_near_date(cop, date):

    clusteringDefault = '%Y/%m/%d'
    inicioCopaConf = datetime(2013,6,10)
    terminoCopaConf = datetime(2013,7,3)
    #clusterPorHora = '%Y/%B/%d %H:%m:%S'
    sincronizacoes = Sincronizacao.get_all()
    tmp = [sinc for sinc in sincronizacoes if sinc.cop_responsavel['id']==cop]
    qtde = 0
    print len(tmp)
    for sinc in sincronizacoes:
        actionsDates = []
        if (sinc.cop_responsavel['id']== cop or cop == 'TODOS'):
         
            for action in sinc.acoes:
                print action.inicio, date - action.inicio
                if (
                   #((action.tipo == 'INTERVALO') and (action.inicio <= date) and (date <=action.fim)) or
                   #((action.tipo == 'PONTUAL')and (action.inicio <= date))
                     action.tipo == 'INTERVALO' and 
                     action.inicio >=inicioCopaConf and
                     action.fim <= terminoCopaConf and 
                     (date - action.inicio).days <= 1 and 
                     (action.fim - date).days <=1                 
                   #or
                   #((action.tipo == 'PONTUAL') and (date - action.inicio).days <= 1)
                ):
                    
                    qtde = qtde + len(actionsDates)

    return qtde
예제 #18
0
def dia_actual_inasistencia(fecha_dia, u):
    if Asistencia.objects.filter(user=u):
        if Asistencia.objects.filter(user=u).order_by('-fecha')[0]:
            ult_asi_aux = Asistencia.objects.filter(user=u).order_by('-fecha')[0]
            if ult_asi_aux.turno_ini.year == fecha_dia.year and ult_asi_aux.turno_ini.month == fecha_dia.month and ult_asi_aux.turno_ini.day == fecha_dia.day:
                return 1
    sem = dia_semana(fecha_dia.day, fecha_dia.month, fecha_dia.year)
    if u.get_profile().horario.dias.filter(dia__codigo=sem):
        try:
            campo = u.get_profile().horario.dias.get(dia__codigo=sem)
            if campo:
                ini = campo.turno.hora_ini
                if ini.hour == 23:
                    if fecha_dia.hour > 7:
                        Asistencia.objects.create(user=u,
                                                  fecha=datetime(fecha_dia.year, fecha_dia.month, fecha_dia.day),
                                                  valor=False,
                                                  turno_ini=datetime(fecha_dia.year, fecha_dia.month, fecha_dia.day))
                if ini.hour == 7:
                    if (fecha_dia.hour > 17 or ( fecha_dia.hour == 17 and fecha_dia.minute > 30 )):
                        Asistencia.objects.create(user=u,
                                                  fecha=datetime(fecha_dia.year, fecha_dia.month, fecha_dia.day),
                                                  valor=False,
                                                  turno_ini=datetime(fecha_dia.year, fecha_dia.month, fecha_dia.day))
        except Exception:
            print Exception
예제 #19
0
    def test_apply_schema(self):
        from datetime import date, datetime
        rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
                                    date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
                                    {"a": 1}, (2,), [1, 2, 3], None)])
        schema = StructType([
            StructField("byte1", ByteType(), False),
            StructField("byte2", ByteType(), False),
            StructField("short1", ShortType(), False),
            StructField("short2", ShortType(), False),
            StructField("int1", IntegerType(), False),
            StructField("float1", FloatType(), False),
            StructField("date1", DateType(), False),
            StructField("time1", TimestampType(), False),
            StructField("map1", MapType(StringType(), IntegerType(), False), False),
            StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
            StructField("list1", ArrayType(ByteType(), False), False),
            StructField("null1", DoubleType(), True)])
        df = self.spark.createDataFrame(rdd, schema)
        results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
                             x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
        r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
             datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
        self.assertEqual(r, results.first())

        with self.tempView("table2"):
            df.createOrReplaceTempView("table2")
            r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
                               "short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
                               "float1 + 1.5 as float1 FROM table2").first()

            self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
예제 #20
0
 def testTiempoMenor14Min(self):
     # Caso de prueba tiempo menor a (14) minutos
     tarifaPrueba = Tarifa(4, 6)
     reservaIni = datetime(2015, 6, 29, 12, 0, 0, 0)
     reservaFin = datetime(2015, 6, 29, 12, 14, 0, 0)
     tiempoReserva = [reservaIni, reservaFin]
     self.assertRaises(Exception, calcularPrecio,tarifaPrueba, tiempoReserva)
예제 #21
0
 def testTiempoExcedido(self):
     # Caso de prueba tiempo de reservacion excedido
     tarifaPrueba = Tarifa(4, 6)
     reservaIni = datetime(2015, 2, 20, 6, 0, 0, 0)
     reservaFin = datetime(2015, 2, 27, 6, 1, 0, 0)
     tiempoReserva = [reservaIni, reservaFin]
     self.assertRaises(Exception, calcularPrecio,tarifaPrueba, tiempoReserva)
예제 #22
0
 def testTarifaNegativa(self):
     # Caso de prueba con tarifa negativa
     tarifaPrueba = Tarifa(-2, -3)
     reservaIni = datetime(2015, 4, 19, 12, 15, 0, 0)
     reservaFin = datetime(2015, 4, 19, 12, 45, 0, 0)
     tiempoReserva = [reservaIni, reservaFin]
     self.assertRaises(Exception, calcularPrecio,tarifaPrueba, tiempoReserva)
예제 #23
0
    def testFloating(self):

        dt = datetime(1999, 1, 2, 13, 46, tzinfo=self.view.tzinfo.floating)
        self.failUnlessEqual(formatTime(self.view, dt), "1:46 PM")

        dt = datetime(2022, 9, 17, 2, 11, tzinfo=self.view.tzinfo.floating)
        self.failUnlessEqual(formatTime(self.view, dt), "2:11 AM")
예제 #24
0
파일: lianjia.py 프로젝트: dzygcc/house
 def crawPriceTrends(self):
     for c in self.sources:
         city = c["city"]
         if c.has_key("trends"):
             print c["trends"]
             json = get_json(c["trends"])
             year = int(json["time"]["year"])
             month = json["time"]["month"]
             month = int(re.compile(u"(\\d+)月").findall(month)[0])
             last = datetime(year, month, 1)
             price_trends = json["currentLevel"]["dealPrice"]["total"]
             price_trends.reverse()
             for price in price_trends:
                 print last, price
                 price = price.encode("utf-8")
                 row = {"city": city, "district":"月趋势", "total": 0, "price": price, "date": last}
                 old = self.dao.get_item(row["city"], row["district"], row["date"])
                 if not old:
                     self.dao.insert_item(row)
                 else:
                     self.dao.update_item(city, "月趋势", 0, price, last)
                 month -= 1
                 if month == 0:
                     year -= 1
                     month = 12
                 last = datetime(year, month, 1)
예제 #25
0
    def testNoTimeZone(self):

        dt = datetime(1999, 1, 2, 13, 46)
        self.failUnlessEqual(formatTime(self.view, dt), "1:46 PM")

        dt = datetime(2022, 9, 17, 2, 11)
        self.failUnlessEqual(formatTime(self.view, dt), "2:11 AM")
예제 #26
0
	def NONBUSICAL(self):
		
		session = self.sess

		date = datetime(2010, 12, 14)
		new_busiday = calendar(CALID = 'DECEMBER', DATE=date)
		session.add(new_busiday)
		session.flush()

		new_sch = schedule(SCHEDID = '1 NONBUSIDAY DEC', INTERVAL = 1, METHOD = 0, AFTER=0, DELTA=3, WAIT=0, CALID='DECEMBER', CALTYPE=1)
		session.add(new_sch)
		session.flush()
		
		new_action = action(ACTIONID = 'TESTACTIONDEC', USERID = self.USERID)
		session.add(new_action)
		session.flush()
		
		date = datetime(2010, 12, 13, 1, 1, 1)
		new_job = job(ACTIONID = 'TESTACTIONDEC', SCHEDID = '1 NONBUSIDAY DEC', SCHDATE=date, STATUS = '2')
		session.add(new_job)
		session.flush()

		new_job.resch(None)
	
		self.assert_(new_job.SCHDATE.strftime("%Y") == '2010', "NONBUSICAL test Invalid Year")
		self.assert_(new_job.SCHDATE.strftime("%d") == '14', "NONBUSICAL test Invalid Day")
		self.assert_(new_job.SCHDATE.strftime("%m") == '12', "NONBUSICAL test Invalid Month")
		self.assert_(new_job.SCHDATE.strftime("%H") == '01', "NONBUSICAL test Invalid Hour")
		self.assert_(new_job.SCHDATE.strftime("%M") == '01', "NONBUSICAL test Invalid Minute")
		self.assert_(new_job.SCHDATE.strftime("%S") == '01', "NONBUSICAL test Invalid Second")
예제 #27
0
def get_rates_by_openid(openid):
    data = []
    lowest = 10000
    highest = 0
    average = 0
    count = 0
    user = User.query.filter_by(openid = openid).first()
    if user == None:
        return [], 0, 0, 0
    today = date.today()
    rates = user.rates.filter(Rate.time >= datetime(today.year, today.month, today.day)).order_by(Rate.time).all()
    ctime = datetime(today.year, today.month, today.day)
    j = 0
    while ctime <= datetime.now():
        if j >= len(rates) or rates[j].time > ctime:
            data.append(0)
        else:
            data.append(rate[j].total)
            lowest = min(lowest, rate[j].total)
            highest = max(highest, rate[j].total)
            average += rate[j].total
            count += 1
            j += 1
        ctime += timedelta(minutes = 10)
    if count > 0 :
        average /= count
    return data, average, highest, lowest
예제 #28
0
    def testDefaultTimeZone(self):

        dt = datetime(1999, 1, 2, 13, 46, tzinfo=self.view.tzinfo.default)
        self.failUnlessEqual(formatTime(self.view, dt), "1:46 PM")

        dt = datetime(2022, 9, 17, 2, 11, tzinfo = self.view.tzinfo.default)
        self.failUnlessEqual(formatTime(self.view, dt), "2:11 AM")
예제 #29
0
def datetimef(d,t=None,fmt='%Y-%m-%d'):
    """"converts something to a datetime
    :param d: can be:
    
    - datetime : result is a copy of d with time optionaly replaced
    - date : result is date at time t, (00:00AM by default)
    - int or float : if fmt is None, d is considered as Excel date numeric format 
      (see http://answers.oreilly.com/topic/1694-how-excel-stores-date-and-time-values/ )
    - string or speciefied format: result is datetime parsed using specified format string
    
    :param fmt: format string. See http://docs.python.org/2/library/datetime.html#strftime-strptime-behavior
    :param t: optional time. replaces the time of the datetime obtained from d. Allows datetimef(date,time)
    :return: datetime
    """
    if isinstance(d,datetime):
        d=d
    elif isinstance(d,date):
        d=datetime(year=d.year, month=d.month, day=d.day)
    elif isinstance(d,(six.integer_types,float)): 
        d=datetime(year=1900,month=1,day=1)+timedelta(days=d-2) #WHY -2 ?
    else:
        d=datetime.strptime(str(d),fmt)
    if t:
        d=d.replace(hour=t.hour,minute=t.minute,second=t.second)
    return d
예제 #30
0
 def testFechaInvalida(self):
     # Caso de prueba con tiempo de reservacion invalido
     tarifaPrueba = Tarifa(4, 6)
     reservaIni = datetime(2015, 4, 30, 12, 15, 0, 0)
     reservaFin = datetime(2015, 4, 19, 12, 45, 0, 0)
     tiempoReserva = [reservaIni, reservaFin]
     self.assertRaises(Exception, calcularPrecio,tarifaPrueba, tiempoReserva)
예제 #31
0
from netCDF4 import Dataset
import netCDF4 
import xarray as xr
from namelist_geos_scripts import download_start, path_to_storm, download_end
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import shapely.geometry as sgeom
import matplotlib.patches as mpatches
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
font = {'family' : 'sans-serif',
        'size'   : 14}
plt.rc('font', **font)
plt.rcParams['axes.linewidth'] = 1.5
#a script that tracks a storm's minimum sea level pressure in the raw NASA data
#first get the time right
starttime=datetime(2005, 8, 30, 17)
end=datetime(2005, 9, 15, 17)
start_lon=150
start_lat=30.5
now=starttime 
d1=now
#don't change this
d2=datetime(2005,5,15,21,30)
d=(d1-d2)
moment=int(d.total_seconds() / 1800)

#%%
#we need to make sure we are only looking over the ocean, so look at oceanfrac first
#this is constant so we don't need to look at multiple start times

oceanfile= 'https://opendap.nccs.nasa.gov/dods/OSSE/G5NR/Ganymed/7km/0.0625_deg/const/const_2d_asm_Nx'
예제 #32
0
    stocks = sys.argv[1]
  else:
    stocks = ("AAPL")
  for stock in stocks.split(','):
    if 'quotes' in locals():
       del(quotes)
    quotes = GoogleIntradayQuote(stock,60,300)
    #atts = [a for a in dir(quotes) if not a.startswith('__')]    
    #print atts
    for i in range(len(quotes.high)):
      #from dateutil import parser    
      #datetime_str = str(quotes.date[i]) + ' ' + str(quotes.time[i]) + ' PST'
      #dt = parser.parse(datetime_str)
      from datetime import datetime
      datetime_str = str(quotes.date[i]) + '-' + str(quotes.time[i])

      dt = datetime.strptime(datetime_str,"%Y-%m-%d-%H:%M:%S")
      td = (dt - datetime(1970, 1, 1))
      epoch_time = (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6 

      
      #print 'open '   + str(epoch_time) + ' ' + str(quotes.date[i]) + ' '  + str(quotes.time[i]) + ' ' + str(quotes.open_[i]) + ' symbol=' + str(quotes.symbol)      
      print ('open '   + str(epoch_time) + ' ' + str(quotes.open_[i]) + ' symbol=' + str(quotes.symbol), file=f)      
      print ('close '  + str(epoch_time) + ' ' + str(quotes.close[i]) + ' symbol=' + str(quotes.symbol), file=f)    
      print ('high '  + str(epoch_time) + ' ' + str(quotes.high[i]) + ' symbol=' + str(quotes.symbol), file=f)           
      print ('low '  + str(epoch_time) + ' ' + str(quotes.low[i]) + ' symbol=' + str(quotes.symbol), file=f)          
      print ('volume '  + str(epoch_time) + ' ' + str(quotes.volume[i]) + ' symbol=' + str(quotes.symbol), file=f)

  print (quotes)    
  f.close()
예제 #33
0
 def date_obj(self, date=datetime.now()):
     items = []
     for obj in self.all():
         if datetime(obj.purchase_date):
             items.append(obj)
     return items
예제 #34
0
import pandas as pd

from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from datetime import datetime, timedelta

from clickhouse_driver import Client

name_surname = "name_surname"

default_args = {
    'owner': 'airflow',
    'depends_on_past': False,
    'start_date': datetime(2019, 3, 14),
    'email': ['*****@*****.**'],
    'email_on_failure': False,
    'email_on_retry': False,
    'retries': 1,
    'retry_delay': timedelta(minutes=5),
    'provide_context': True,
    # 'queue': 'bash_queue',
    # 'pool': 'backfill',
    # 'priority_weight': 10,
    # 'end_date': datetime(2016, 1, 1),
}

dag = DAG(
    name_surname + '_01',
    default_args=default_args,
예제 #35
0
 def filter_document(self, document):
     fulltime = datetime.fromtimestamp(document['timestamp'])
     date = datetime.combine(fulltime.date(), time(0, 0))
     timestamp = int((date - datetime(1970, 1, 1)).total_seconds())
     document['timestamp'] = timestamp
예제 #36
0
def get_datetime(x):
    return datetime(x[0], x[1], x[2], x[3], x[4], x[5])
예제 #37
0
# Advice from the datetime maintainer Paul Ganssle on Talk Python to Me:
# https://talkpython.fm/episodes/show/271/unlock-the-mysteries-of-time-pythons-datetime-that-is
# https://blog.ganssle.io/
#
# recommends not using pytz (which will be dropped in 2 years).
# Instead, using the new zoneinfo coming in with python 3.9.
# (backport in pypi), or use dateutil.tz

# A datetime.tzinfo object is a set of rules that include when to switch
# between DST and ST; it's not just an offset.
from dateutil import tz
MT = tz.gettz("America/Denver")

# Timezones have names, but getting them requires a date:
time_winter = datetime(2020, 2, 14, 12,tzinfo=MT)
MT.tzname(time_winter)
# -> 'MST'
time_summer = datetime(2020, 7, 14, 12,tzinfo=MT)
MT.tzname(time_summer)
# -> 'MDT'

# Don't use utcnow or utcfromtimestamp in modern python.
# They might even be deprecated soon. Instead:
from datetime import timezone
utcnow = datetime.now(tz=timezone.utc)
datetime.fromtimestamp(1571595618.0, tz=timezone.utc)

# All pytz code should be migrated over to zoneinfo or dateutil
# and get on the PEP495 bandwagon.
# PEP 495: Local time disambiguation: https://www.python.org/dev/peps/pep-0495/
예제 #38
0
import plotly.graph_objs as go
from plotly.graph_objs import Scatter
import pandas as pd
import plotly.offline as offline
import sqlite3
import datetime
import django
django.setup()
from WeatherStation.models import Record
from pytz import timezone
from datetime import datetime
import plotly.figure_factory as ff
from django.db.models import Max, Min, StdDev, Avg

slct = 0
sdate = datetime(2016, 3, 2)
edate = datetime(2017, 2, 3)
sdate2 = datetime(2016, 4, 4)

column = str('battAvg')

#for median,
#count = dt.count()
#dt.values_list(term, flat=True).order_by(term)[int(round(count/2))]

if slct == 0:
    dt = Record.objects.filter(timeStamp__range=(sdate, edate))
    list = [['Max batt', 'Min batt', 'stdDev batt', 'batt avg'],
            [
                dt.aggregate(battmax=Max(column))['battmax'],
                dt.aggregate(battmax=Min('battAvg'))['battmax'],
def main(filename_AttgFellow):

    G = nx.Graph()

    ###############################################
    # adding nodes for the Attendings and Fellows:#
    ###############################################

    result_att_fellows = csv.reader(open(filename_AttgFellow, 'rb'),
                                    delimiter=',')

    reader = csv.DictReader(open(filename_AttgFellow))
    csv_att_fellow_headers = reader.fieldnames

    cont_shifts = 0
    list_atts = []
    list_fellows = []
    list_shifts = []
    for row in result_att_fellows:

        if row != csv_att_fellow_headers:  # i exclude the first row  (how to do it more efficintly???)

            if len(
                    row[0]
            ) > 0:  #just in case the field is empty (for the fist week, it is)

                #labels for the nodes:
                shift1 = str(row[0]) + " T1"
                att1 = str(row[1])
                fellow1 = str(row[2])

                shift2 = str(row[0]) + " T2"
                att2 = str(row[3])
                fellow2 = str(row[4])

                weekend_shift = str(row[5])
                att_w1 = str(row[6])
                att_w2 = str(row[7])
                fellow_w = str(row[8])

                if "," in att2:  # in some fields we have: att1 mm/dd-dd, att2 mm/dd-dd

                    #team2:
                    parts = att2.split(" ")
                    att2a = parts[0]
                    att2b = parts[2]
                    date1 = parts[1]
                    date2 = parts[3]

                    part_times = date1.split("/")
                    mm = int(part_times[0])

                    part_times[1] = part_times[1].strip(",")
                    dd_start = int(part_times[1].split("-")[0])
                    dd_end = int(part_times[1].split("-")[1])

                    date_start_week_att2a = datetime(yy, mm, dd_start)
                    date_end_week_att2a = datetime(yy, mm, dd_end)

                    shift2a = str(mm) + "/" + str(dd_start) + "/" + str(
                        yy) + " T2"
                    G.add_node(shift2a)

                    G.node[shift2a]["type"] = "shift"
                    G.node[shift2a]["start"] = date_start_week
                    G.node[shift2a]["stop"] = date_end_week
                    G.node[shift2a]["order"] = cont_shifts
                    G.node[shift2a]["team"] = 2

                    list_shifts.append(shift2a)
                    cont_shifts += 1

                    if att2a not in list_atts:
                        list_atts.append(att2a)
                        G.add_node(att2a)
                        G.node[att2a]["type"] = "A"
                    G.add_edge(shift2a, att2a)
                    G.add_edge(shift2a, fellow2)

                    part_times = date2.split("/")
                    mm = int(part_times[0])

                    dd_start = int(part_times[1].split("-")[0])
                    dd_end = int(part_times[1].split("-")[1])

                    date_start_week_att2b = datetime(yy, mm, dd_start)
                    date_end_week_att2b = datetime(yy, mm, dd_end)

                    shift2b = str(mm) + "/" + str(dd_start) + "/" + str(
                        yy) + " T2"
                    G.add_node(shift2b)

                    G.node[shift2b]["type"] = "shift"
                    G.node[shift2b]["start"] = date_start_week
                    G.node[shift2b]["stop"] = date_end_week
                    G.node[shift2b]["order"] = cont_shifts
                    G.node[shift2b]["team"] = 2

                    list_shifts.append(shift2b)
                    cont_shifts += 1

                    if att2b not in list_atts:
                        list_atts.append(att2b)
                        G.add_node(att2b)
                        G.node[att2b]["type"] = "A"
                    G.add_edge(shift2b, att2b)
                    G.add_edge(shift2b, fellow2)

                    #team1:
                    if att1 not in list_atts:
                        list_atts.append(att1)
                        G.add_node(att1)
                        G.node[att1]["type"] = "A"
                    G.add_edge(shift2a, att1)
                    G.add_edge(shift2b, att1)

                    if fellow1 not in list_fellows:
                        list_fellows.append(fellow1)
                        G.add_node(fellow1)
                        G.node[fellow1]["type"] = "F"
                    G.add_edge(shift2a, fellow1)
                    G.add_edge(shift2b, fellow1)

                else:  # for most of the lines

                    #dates:                Week:
                    start_week = row[0]
                    part_times = start_week.split("/")
                    mm = int(part_times[0])
                    dd = int(part_times[1])
                    yy = 2000 + int(part_times[2])
                    date_start_week = datetime(yy, mm, dd)

                    end_week = row[
                        5]  # always CLOSED interval to describe the shifts
                    part_times = end_week.split("/")
                    mm = int(part_times[0])
                    dd = int(part_times[1])
                    yy = 2000 + int(part_times[2])
                    date_end_week = datetime(yy, mm, dd) - timedelta(days=1)

                    # Weekend:
                    date_start_weeked = datetime(yy, mm, dd)
                    date_end_weeked = datetime(yy, mm, dd) + timedelta(days=1)

                    #team1:
                    G.add_node(shift1)
                    G.node[shift1]["type"] = "shift"
                    G.node[shift1]["start"] = date_start_week
                    G.node[shift1]["stop"] = date_end_week
                    G.node[shift1]["order"] = cont_shifts
                    G.node[shift1]["team"] = 1

                    list_shifts.append(shift1)
                    cont_shifts += 1

                    if att1 not in list_atts:
                        list_atts.append(att1)
                        G.add_node(att1)
                        G.node[att1]["type"] = "A"
                    G.add_edge(shift1, att1)

                    if fellow1 not in list_fellows:
                        list_fellows.append(fellow1)
                        G.add_node(fellow1)
                        G.node[fellow1]["type"] = "F"
                    G.add_edge(shift1, fellow1)

                    #team2:
                    if att2 not in list_atts:
                        list_atts.append(att1)
                        G.add_node(att2)
                        G.node[att2]["type"] = "A"

                    shift2 = shift2 + " T2"
                    G.add_node(shift2)

                    G.node[shift2]["type"] = "shift"
                    G.node[shift2]["start"] = date_start_week
                    G.node[shift2]["stop"] = date_end_week
                    G.node[shift2]["order"] = cont_shifts
                    G.node[shift2]["team"] = 2

                    list_shifts.append(shift2)
                    cont_shifts += 1

                    G.add_edge(shift2, att2)

                    if fellow2 not in list_fellows:
                        list_fellows.append(fellow2)
                        G.add_node(fellow2)
                        G.node[fellow2]["type"] = "F"
                    G.add_edge(shift2, fellow2)

            #weekends:

                G.add_node(weekend_shift)

                end_week = row[5]
                part_times = end_week.split("/")
                mm = int(part_times[0])
                dd = int(part_times[1])
                yy = 2000 + int(part_times[2])

                date_start_weeked = datetime(yy, mm, dd)
                date_end_weeked = datetime(yy, mm, dd) + timedelta(days=1)

                if fellow_w not in list_fellows:
                    list_fellows.append(fellow_w)
                    G.add_node(fellow_w)
                    G.node[fellow_w]["type"] = "F"
                G.add_edge(weekend_shift, fellow_w)

                if att_w1 not in list_atts:
                    list_atts.append(att_w1)
                    G.add_node(att_w1)
                    G.node[att_w1]["type"] = "A"
                G.add_edge(weekend_shift, att_w1)

                if att_w2 not in list_atts:
                    list_atts.append(att_w2)
                    G.add_node(att_w2)
                    G.node[att_w2]["type"] = "A"
                G.add_edge(weekend_shift, att_w2)

                G.node[weekend_shift]["type"] = "shift"
                G.node[weekend_shift]["start"] = date_start_weeked
                G.node[weekend_shift]["stop"] = date_end_weeked
                G.node[weekend_shift]["order"] = cont_shifts
                G.node[weekend_shift]["team"] = "w"

                list_shifts.append(weekend_shift)
                cont_shifts += 1

            else:  # to deal with the first line, that only has weekend shift

                weekend_shift = str(row[5])
                att_w1 = str(row[6])
                att_w2 = str(row[7])
                fellow_w = str(row[8])

                G.add_node(weekend_shift)

                end_week = row[5]
                part_times = end_week.split("/")
                mm = int(part_times[0])
                dd = int(part_times[1])
                yy = 2000 + int(part_times[2])

                date_start_weeked = datetime(yy, mm, dd)
                date_end_weeked = datetime(yy, mm, dd) + timedelta(days=1)

                if fellow_w not in list_fellows:
                    list_fellows.append(fellow_w)
                    G.add_node(fellow_w)
                    G.node[fellow_w]["type"] = "F"
                G.add_edge(weekend_shift, fellow_w)

                if att_w1 not in list_atts:
                    list_atts.append(att_w1)
                    G.add_node(att_w1)
                    G.node[att_w1]["type"] = "A"
                G.add_edge(weekend_shift, att_w1)

                if att_w2 not in list_atts:
                    list_atts.append(att_w2)
                    G.add_node(att_w2)
                    G.node[att_w2]["type"] = "A"
                G.add_edge(weekend_shift, att_w2)

                G.node[weekend_shift]["type"] = "shift"
                G.node[weekend_shift]["start"] = date_start_weeked
                G.node[weekend_shift]["stop"] = date_end_weeked
                G.node[weekend_shift]["order"] = cont_shifts
                G.node[weekend_shift]["team"] = "w"

                list_shifts.append(weekend_shift)
                cont_shifts += 1

#NOTE: the few exceptions (long weekends) are NOT taking into account for now.

##################################
#writing the network into a file:#
##################################

    network_name = filename_AttgFellow.split("/")[-1]
    network_name = network_name.split(".csv")[0]
    nx.write_gml(G, "../Results/Doctors_Shifts_network.gml"
                 )  #  you run the code from  Idea-Spread-Hospital/Code

    for n in G.nodes():
        try:
            G.node[n]["stop"] = None
            G.node[n]["start"] = None

        except:
            pass

    nx.write_gml(G, "../Results/Doctors_Shifts_network_without_times.gml"
                 )  #  you run the code from  Idea-Spread-Hospital/Code

    num_A = 0
    num_F = 0
    num_s = 0
    for n in G.nodes():

        try:
            if G.node[n]["type"] == "F":
                num_F += 1
            elif G.node[n]["type"] == "A":
                num_A += 1

            else:  # for shift-like nodes
                num_s += 1

        except KeyError:
            print n

    print "total # of fellows:", num_F
    print "total # of attendings:", num_A
    print "total # shifts:", num_s

    print "total # nodes:", len(G.nodes())
예제 #40
0
with open("{}.json".format(sys.argv[1])) as f:
    data = json.load(f)

timeseries_dict = {}
timeseries_list = []
final_dict = {}
country = ""

for i in range(len(data)):
    for year in years:
        if not data[i][str(year)]:
            pass
        else:
            timeseries_dict["value"] = float(data[i][str(year)])
        timeseries_dict["timestamp"] = float(
            datetime.timestamp(datetime(year, end_month, end_date)))
        timeseries_list.append(timeseries_dict)
        timeseries_dict = {}
    final_dict["data"] = timeseries_list
    country = data[i]["country"]
    if not os.path.exists(country):
        os.makedirs(country)
    with open('{}/{}.json'.format(country, data[i]["indicator"]),
              'w') as outfile:
        final_dict["forecast_to"] = 1767119400.0
        final_dict[
            "callback"] = "http://your.domain/yourcallback"  #this needs to be replaced as per web app
        json.dump(final_dict, outfile)
    timeseries_list = []
    final_dict = {}
예제 #41
0
 def calcuskew(self):
     end = datetime.today()
     begin = datetime(2015, 2, 11)
     #begin = end - timedelta(10)
     day2 = []
     for i in range((end - begin).days + 1):
         day = begin + timedelta(days=i)
         day2.append(day)
     hisSKEW = []
     for date in day2:
         closePrice = pd.DataFrame(
             w.wsd("510050.SH", "close", date, date,
                   "TradingCalender=DCE").Data[0]).dropna()
         close = np.array(closePrice)
         if close == [[u'CWSDService: No data.']]:
             continue
         OPC3 = optionchoose3(close, date)
         option_year, option_month = OPC3.option_name()
         price = OPC3.mround()
         option_at_call = "50ETF购" + str(option_year) + "年" + str(
             option_month) + "月" + str('{:.2f}'.format(price))
         option_out_call = "50ETF购" + str(option_year) + "年" + str(
             option_month) + "月" + str('{:.2f}'.format(price + 0.05))
         w_etf_data = pd.DataFrame(
             w.wset('SectorConstituent',
                    u'date=' + str(date) + ';sector=华夏上证50ETF期权').Data)
         w_etf_data = w_etf_data.T
         try:
             at_call_Code = w_etf_data[w_etf_data[2] ==
                                       option_at_call][1].values[0]
             out_call_Code = w_etf_data[w_etf_data[2] ==
                                        option_out_call][1].values[0]
         except IndexError:
             try:
                 option_at_call = "50ETF购" + str(option_month) + "月" + str(
                     '{:.2f}'.format(price))
                 at_call_Code = w_etf_data[w_etf_data[2] ==
                                           option_at_call][1].values[0]
                 option_out_call = "50ETF购" + str(option_month) + "月" + str(
                     '{:.2f}'.format(price + 0.05))
                 out_call_Code = w_etf_data[w_etf_data[2] ==
                                            option_out_call][1].values[0]
             except IndexError:
                 continue
         his_atIVX = pd.DataFrame(
             w.wsd(at_call_Code, "us_impliedvol", date, date,
                   "TradingCalender=DCE").Data).dropna()
         his_outIVX = pd.DataFrame(
             w.wsd(out_call_Code, "us_impliedvol", date, date,
                   "TradingCalender=DCE").Data).dropna()
         try:
             his_skew = his_outIVX / his_atIVX
             his_skew = his_skew.values[0][0]
         except:
             continue
         hisSKEW.append(his_skew)
     per1, per2, per3, per4 = np.percentile(hisSKEW, 90), np.percentile(
         hisSKEW, 60), np.percentile(hisSKEW,
                                     40), np.percentile(hisSKEW, 10)
     print("etfskew percentile")
     print([per1, per2, per3, per4])
     return per1, per2, per3, per4
        'forPlot': 'Webster, NY'
    }
}

# Go through each hour of each day and month
for month in range(2, 3):  # January
    if month == 1:
        minDate = 11
        maxDate = 31
    elif month == 2:
        minDate = 15
        maxDate = 18
    for day in range(minDate, maxDate + 1):
        for hour in range(0, 24):
            # Get date and time
            current_dt = datetime(2022, month, day, hour, 0)
            current_dt_string = datetime.strftime(current_dt, '%Y%m%d%H%M')
            current_date_string = datetime.strftime(current_dt, '%Y%m%d')
            current_hourMin_string = datetime.strftime(current_dt, '%H%M')
            yesterday_dt = current_dt - timedelta(days=1)
            yesterday_date_string = datetime.strftime(yesterday_dt, '%Y%m%d')
            date_list = [yesterday_date_string, current_date_string]

            print('datetime = {}'.format(current_dt_string))
            for station in station_dict.keys():
                print('Plotting and saving data for {} at {}.'.format(
                    station, current_dt_string))
                station_name_file = station_dict[station]['forFilename']
                station_name_plot = station_dict[station]['forPlot']

                # Create dataframe with datetime values as primary index
예제 #43
0
#from datetime import date
#from datetime import time

from datetime import *

host_path = "C:\\Windows\\System32\\drivers\\etc"
redirect = "127.0.0.1"

website_list = ["www.facebook.com", "www.instagram.com"]

start_date = datetime(2020, 7, 29)
end_date = datetime(2020, 7, 30)
today_date = datetime(datetime.now().year,
                      datetime.now().month,
                      datetime.now().day)

while True:
    if start_date <= today_date < end_date:
        with open(host_path, "r+") as file:
            content = file.read()
            for site in website_list:
                if site in content:
                    pass
                else:
                    file.write(redirect + " " + site + "\n")
        print("All sites are blocked!")
        break
    else:  #end_date < today_date
        with open(host_path, "r+") as file:
            content = file.readlines()
            file.seek(0)
예제 #44
0
meter_no = input_string.split()[0].replace('-', '')  # meter number
#print(meter)
read_time = f'{input_string.split()[1]} {input_string.split()[2]}'  #reading time

#global meter_no
# sql_1 = "SELECT CTratio, VTratio FROM meter_details WHERE meter_no=?",((meter,))
#c.execute("SELECT CTratios, VTratio FROM standalone_meter_details WHERE meter_no = %s", meter)
#data = c.fetchone()

date_pattern = '%{MONTHNUM:month}/%{MONTHDAY:day}/%{YEAR:year}'
grok = Grok(date_pattern)
xl = (grok.match(input_string))
mm = xl['month']
dd = xl['day']
yr = xl['year']
x = datetime(int(yr), int(mm), int(dd))
# ll = (x.strftime("%d-%m-%Y"))
D_O_R = (x.strftime("%d-%b-%Y"))  # reading date
list1 = []
list2 = []
list7 = []
for m in range(1, sheet.max_row + 1):
    for n in range(1, sheet.max_column + 1):
        #cell1 = sheet.cell(m,n)
        cell = sheet.cell(m, n).value
        cell_value = str(cell)
        if (cell_value.find('Historical data set') != -1):
            list1.append(cell_value)
            list2.append(m)
print(len(list1))
print(list2)
def JDS_file_reader(fileList, result_path, MaxNsp, spSkip, RFImeanConst, Vmin,
                    Vmax, VminNorm, VmaxNorm, VminCorrMag, VmaxCorrMag,
                    colormap, customDPI, CorrelationProcess, longFileSaveAch,
                    longFileSaveBch, longFileSaveCRI, longFileSaveCMP,
                    DynSpecSaveInitial, DynSpecSaveCleaned,
                    CorrSpecSaveInitial, CorrSpecSaveCleaned,
                    SpecterFileSaveSwitch, ImmediateSpNo):
    currentTime = time.strftime("%H:%M:%S")
    currentDate = time.strftime("%d.%m.%Y")

    # *** Creating a folder where all pictures and results will be stored (if it doen't exist) ***
    if not os.path.exists(result_path):
        os.makedirs(result_path)
    if not os.path.exists(result_path + '/Service'):
        os.makedirs(result_path + '/Service')
    if DynSpecSaveInitial == 1:
        if not os.path.exists(result_path + '/Initial_spectra'):
            os.makedirs(result_path + '/Initial_spectra')
    if (DynSpecSaveCleaned == 1 and CorrelationProcess == 1):
        if not os.path.exists(result_path + '/Correlation_spectra'):
            os.makedirs(result_path + '/Correlation_spectra')

    # Main loop
    for fileNo in range(len(fileList)):  # loop by files
        print('\n\n\n  *  File ', str(fileNo + 1), ' of', str(len(fileList)))
        print('  *  File path: ', str(fileList[fileNo]))

        #*********************************************************************************

        # *** Opening datafile ***
        fname = ''
        if len(fname) < 1: fname = fileList[fileNo]

        # *** Data file header read ***
        [
            df_filename, df_filesize, df_system_name, df_obs_place,
            df_description, CLCfrq, df_creation_timeUTC, SpInFile,
            ReceiverMode, Mode, Navr, TimeRes, fmin, fmax, df, frequency,
            FreqPointsNum, dataBlockSize
        ] = FileHeaderReaderJDS(fname, 0, 0)

        # Initial time line settings
        TimeScaleStartDate = datetime(int(df_creation_timeUTC[0:4]),
                                      int(df_creation_timeUTC[5:7]),
                                      int(df_creation_timeUTC[8:10]), 0, 0, 0,
                                      0)

        timeLineMS = np.zeros(
            int(SpInFile))  # List of ms values from ends of spectra

        # *** Creating a name for long timeline TXT file ***
        if fileNo == 0 and (longFileSaveAch == 1 or longFileSaveBch == 1
                            or longFileSaveCRI == 1 or longFileSaveCMP == 1):
            TLfile_name = df_filename + '_Timeline.txt'
            TLfile = open(
                TLfile_name,
                'wb')  # Open and close to delete the file with the same name
            TLfile.close()

        with open(fname, 'rb') as file:

            # *** If it is the first file - write the header to long data file
            if ((longFileSaveAch == 1 or longFileSaveBch == 1
                 or longFileSaveCRI == 1 or longFileSaveCMP == 1)
                    and fileNo == 0):
                file.seek(0)
                file_header = file.read(1024)

                DAT_file_name = df_filename
                DAT_file_list = []

                # *** Creating a binary file with data for long data storage ***
                if ((Mode == 1 or Mode == 2) and longFileSaveAch == 1):
                    Data_A_name = df_filename + '_Data_chA.dat'
                    Data_AFile = open(Data_A_name, 'wb')
                    Data_AFile.write(file_header)
                    Data_AFile.close()
                    DAT_file_list.append('chA')
                if (longFileSaveBch == 1 and (Mode == 1 or Mode == 2)):
                    Data_B_name = df_filename + '_Data_chB.dat'
                    Data_BFile = open(Data_B_name, 'wb')
                    Data_BFile.write(file_header)
                    Data_BFile.close()
                    DAT_file_list.append('chB')
                if (longFileSaveCRI == 1 and CorrelationProcess == 1
                        and Mode == 2):
                    Data_CRe_name = df_filename + '_Data_CRe.dat'
                    Data_CReFile = open(Data_CRe_name, 'wb')
                    Data_CReFile.write(file_header)
                    Data_CReFile.close()
                    DAT_file_list.append('CRe')
                    Data_CIm_name = df_filename + '_Data_CIm.dat'
                    Data_CImFile = open(Data_CIm_name, 'wb')
                    Data_CImFile.write(file_header)
                    Data_CImFile.close()
                    DAT_file_list.append('CIm')
                if (longFileSaveCMP == 1 and CorrelationProcess == 1
                        and Mode == 2):
                    Data_Cm_name = df_filename + '_Data_C_m.dat'
                    Data_CmFile = open(Data_Cm_name, 'wb')
                    Data_CmFile.write(file_header)
                    Data_CmFile.close()
                    DAT_file_list.append('C_m')
                    Data_Cp_name = df_filename + '_Data_C_p.dat'
                    Data_CpFile = open(Data_Cp_name, 'wb')
                    Data_CpFile.write(file_header)
                    Data_CpFile.close()
                    DAT_file_list.append('C_p')

                del file_header

    #*******************************************************************************
    #                         R E A D I N G   D A T A                              *
    #*******************************************************************************
    #print ('\n  *** Reading data from file *** \n')
            file.seek(1024)  # Jumping to 1024 byte from file beginning
            if Mode == 0:
                print(
                    '\n\n  Data in waveform mode, use appropriate program!!! \n\n\n'
                )

            if Mode > 0 and Mode < 3:  # Spectra modes
                figID = -1
                figMAX = int(math.ceil((SpInFile - spSkip) / MaxNsp))
                if figMAX < 1: figMAX = 1
                for fig in range(figMAX):
                    Time1 = time.time()  # Timing
                    figID = figID + 1
                    currentTime = time.strftime("%H:%M:%S")
                    print(' File # ', str(fileNo + 1), ' of ',
                          str(len(fileList)), ', figure # ', figID + 1, ' of ',
                          figMAX, '   started at: ', currentTime)
                    if (SpInFile - spSkip - MaxNsp * figID) < MaxNsp:
                        Nsp = int(SpInFile - spSkip - MaxNsp * figID)
                    else:
                        Nsp = MaxNsp

                    # *** Preparing empty matrices ***
                    if Mode == 1 or Mode == 2:
                        Data_ChA = np.zeros((Nsp, FreqPointsNum))

                    if Mode == 1 or Mode == 2:
                        Data_ChB = np.zeros((Nsp, FreqPointsNum))

                    if Mode == 2:
                        Data_CRe = np.zeros((Nsp, FreqPointsNum))
                        Data_CIm = np.zeros((Nsp, FreqPointsNum))
                        CorrModule = np.zeros((Nsp, FreqPointsNum))
                        CorrPhase = np.zeros((Nsp, FreqPointsNum))

                    # *** Reading and reshaping all data for figure ***
                    if Mode == 1:
                        raw = np.fromfile(file,
                                          dtype='u4',
                                          count=(2 * Nsp * FreqPointsNum))
                        raw = np.reshape(raw, [2 * FreqPointsNum, Nsp],
                                         order='F')
                        Data_ChA = raw[0:(FreqPointsNum * 2):2, :].transpose()
                        Data_ChB = raw[1:(FreqPointsNum * 2):2, :].transpose()

                    if Mode == 2:
                        raw = np.fromfile(file,
                                          dtype='u4',
                                          count=(4 * Nsp * FreqPointsNum))
                        raw = np.reshape(raw, [4 * FreqPointsNum, Nsp],
                                         order='F')
                        Data_ChA = raw[0:(FreqPointsNum * 4):4, :].transpose()
                        Data_ChB = raw[1:(FreqPointsNum * 4):4, :].transpose()
                        Data_CRe = raw[2:(FreqPointsNum * 4):4, :].transpose()
                        Data_CIm = raw[3:(FreqPointsNum * 4):4, :].transpose()

                    del raw

                    # *** Single out timing from data ***
                    counterA2 = np.uint64(Data_ChA[:, -1])
                    counterB2 = np.uint64(Data_ChB[:, -1])
                    counterA1 = np.uint64(Data_ChA[:, -2])
                    counterB1 = np.uint64(Data_ChB[:, -2])

                    A = np.uint64(int('01111111111111111111111111111111', 2))
                    msCount = np.uint32(np.bitwise_and(
                        counterB2, A))  # number of ms since record started
                    ftCount = np.uint32(np.bitwise_and(
                        counterA2,
                        A))  # number of specter since record started

                    A = np.uint64(int('00000111111111111111111111111111', 2))
                    phaOfSec = np.uint32(np.bitwise_and(
                        counterA1, A))  # phase of second for the spectr
                    A = np.uint64(int('00000000000000011111111111111111', 2))
                    secOfDay = np.uint32(np.bitwise_and(
                        counterB1, A))  # second of the day for the specter

                    # *** Time line arranging ***

                    # Preparing/cleaning matrices for time scales
                    TimeScale = []  # New for each file
                    TimeFigureScale = [
                    ]  # Timelime (new) for each figure (Nsp)
                    # Calculations
                    FigStartTime = timedelta(
                        0, int(secOfDay[0]),
                        int(1000000 * phaOfSec[0] / CLCfrq))
                    for i in range(Nsp):
                        TimeAdd = timedelta(
                            0, int(secOfDay[i]),
                            int(1000000 * phaOfSec[i] / CLCfrq))
                        TimeScale.append(str(str(TimeScaleStartDate +
                                                 TimeAdd)))
                        TimeFigureScale.append(str((TimeAdd - FigStartTime)))

                    TimeFigureScaleFig = np.empty_like(TimeFigureScale)
                    TimeScaleFig = np.empty_like(TimeScale)
                    for i in range(len(TimeFigureScale)):
                        TimeFigureScaleFig[i] = TimeFigureScale[i][0:11]
                        TimeScaleFig[i] = TimeScale[i][11:23]

                    # *** Converting from FPGA to PC float format ***
                    if Mode == 1 or Mode == 2:
                        Data_ChA = FPGAtoPCarrayJDS(Data_ChA, Navr)
                        Data_ChB = FPGAtoPCarrayJDS(Data_ChB, Navr)
                    if (Mode == 2 and CorrelationProcess == 1):
                        Data_CRe = FPGAtoPCarrayJDS(Data_CRe, Navr)
                        Data_CIm = FPGAtoPCarrayJDS(Data_CIm, Navr)
                    '''
                    # *** Absolute correlation specter plot ***
                    if Mode == 2 and figID == 0:   #  Immediate correlation spectrum channels A & B
                        TwoImmedSpectraPlot(frequency, Data_CRe[1][:], Data_CIm[1][:], 'Channel A', 'Channel B',
                                            frequency[0], frequency[FreqPointsNum-1], -0.001, 0.001,
                                            'Frequency, MHz', 'Amplitude, dB',
                                            'Immediate spectrum '+str(df_filename[0:18])+ ' channels A & B',
                                            'Initial parameters: dt = '+str(round(TimeRes,3))+' Sec, df = '+str(round(df/1000,3))+' kHz',
                                            'JDS_Results/Service/'+df_filename[0:14]+' Correlation Spectrum Re and Im before log.png')
                    '''

                    # *** Saving data to a long-term file ***
                    if (Mode == 1 or Mode == 2) and longFileSaveAch == 1:
                        Data_AFile = open(Data_A_name, 'ab')
                        Data_AFile.write(Data_ChA)
                        Data_AFile.close()
                    if (Mode == 1 or Mode == 2) and longFileSaveBch == 1:
                        Data_BFile = open(Data_B_name, 'ab')
                        Data_BFile.write(Data_ChB)
                        Data_BFile.close()
                    if Mode == 2 and longFileSaveCRI == 1 and CorrelationProcess == 1:
                        Data_CReFile = open(Data_CRe_name, 'ab')
                        Data_CReFile.write(np.float64(Data_CRe))
                        Data_CReFile.close()
                        Data_CImFile = open(Data_CIm_name, 'ab')
                        Data_CImFile.write(np.float64(Data_CIm))
                        Data_CImFile.close()

                    if (longFileSaveAch == 1 or longFileSaveBch == 1
                            or longFileSaveCRI == 1 or longFileSaveCMP == 1):
                        with open(TLfile_name, 'a') as TLfile:
                            for i in range(Nsp):
                                TLfile.write(
                                    (TimeScale[i][:] + ' \n'))  #str.encode

                    # *** Converting to logarythmic scale matrices ***
                    if (Mode == 1 or Mode == 2):
                        with np.errstate(invalid='ignore'):
                            Data_ChA = 10 * np.log10(Data_ChA)
                            Data_ChB = 10 * np.log10(Data_ChB)
                        Data_ChA[np.isnan(Data_ChA)] = -120
                        Data_ChB[np.isnan(Data_ChB)] = -120
                    if (Mode == 2 and CorrelationProcess == 1):
                        with np.errstate(invalid='ignore', divide='ignore'):
                            CorrModule = 10 * np.log10(
                                ((Data_CRe)**2 + (Data_CIm)**2)**(0.5))
                            CorrPhase = np.arctan2(Data_CIm, Data_CRe)
                        CorrPhase[np.isnan(CorrPhase)] = 0
                        CorrModule[np.isinf(CorrModule)] = -135.5

                    # *** Saving correlation data to a long-term module and phase files ***
                    if (Mode == 2 and CorrelationProcess == 1
                            and longFileSaveCMP == 1):
                        Data_CmFile = open(Data_Cm_name, 'ab')
                        Data_CmFile.write(np.float64(CorrModule))
                        Data_CmFile.close()
                        Data_CpFile = open(Data_Cp_name, 'ab')
                        Data_CpFile.write(np.float64(CorrPhase))
                        Data_CpFile.close()

                    # *** Saving immediate spectrum to file ***
                    if (SpecterFileSaveSwitch == 1 and figID == 0):
                        SpFile = open(
                            'JDS_Results/Service/Specter_' +
                            df_filename[0:14] + '.txt', 'w')
                        for i in range(FreqPointsNum - 1):
                            if Mode == 1:
                                SpFile.write(
                                    str('{:10.6f}'.format(frequency[i])) +
                                    '  ' + str('{:16.10f}'.format(
                                        Data_ChA[ImmediateSpNo][i])) + '  ' +
                                    str('{:16.10f}'.format(
                                        Data_ChB[ImmediateSpNo][i])) + ' \n')
                            if Mode == 2:
                                SpFile.write(
                                    str(frequency[i]) + '  ' +
                                    str(Data_ChA[ImmediateSpNo][i]) + '  ' +
                                    str(Data_ChB[ImmediateSpNo][i]) + '  ' +
                                    str(Data_CRe[ImmediateSpNo][i]) + '  ' +
                                    str(Data_CIm[ImmediateSpNo][i]) + ' \n')

                        SpFile.close()

    #*******************************************************************************
    #                                  F I G U R E S                               *
    #*******************************************************************************

    # *** Plotting immediate spectra before cleaning and normalizing ***
                    if (Mode == 1 or Mode == 2) and figID == 0:

                        Suptitle = ('Immediate spectrum ' +
                                    str(df_filename[0:18]) + ' channels A & B')
                        Title = ('Place: ' + str(df_obs_place) +
                                 ', Receiver: ' + str(df_system_name) +
                                 '. Initial parameters: dt = ' +
                                 str(round(TimeRes, 3)) + ' Sec, df = ' +
                                 str(round(df / 1000, 3)) + ' kHz ' +
                                 'Description: ' + str(df_description))
                        Filename = (
                            result_path + '/Service/' + df_filename[0:14] +
                            ' Channels A and B Immediate Spectrum before cleaning and normalizing.png'
                        )

                        TwoOrOneValuePlot(
                            2, frequency, Data_ChA[0][:], Data_ChB[0][:],
                            'Channel A', 'Channel B', frequency[0],
                            frequency[FreqPointsNum - 1], -120, -20, -120, -20,
                            'Frequency, MHz', 'Intensity, dB', 'Intensity, dB',
                            Suptitle, Title, Filename, currentDate,
                            currentTime, Software_version)

                    if Mode == 2 and CorrelationProcess == 1 and figID == 0:

                        Suptitle = ('Immediate correlation spectrum ' +
                                    str(df_filename[0:18]) + ' channels A & B')
                        Title = ('Place: ' + str(df_obs_place) +
                                 ', Receiver: ' + str(df_system_name) +
                                 '. Initial parameters: dt = ' +
                                 str(round(TimeRes, 3)) + ' Sec, df = ' +
                                 str(round(df / 1000, 3)) + ' kHz ' +
                                 'Description: ' + str(df_description))
                        Filename = (
                            result_path + '/Service/' + df_filename[0:14] +
                            ' Channels A and B Correlation Immedaiate Spectrum before cleaning and normalizing.png'
                        )

                        TwoOrOneValuePlot(
                            2, frequency, CorrModule[0][:], CorrPhase[0][:],
                            'Correlation module', 'Correlation phase',
                            frequency[0], frequency[FreqPointsNum - 1],
                            VminCorrMag, VmaxCorrMag, -4, 4, 'Frequency, MHz',
                            'Amplitude, dB', 'Phase, deg', Suptitle, Title,
                            Filename, currentDate, currentTime,
                            Software_version)

                    # *** FIGURE Initial dynamic spectrum channels A and B ***
                    if (Mode == 1 or Mode == 2) and DynSpecSaveInitial == 1:

                        Suptitle = ('Dynamic spectrum (initial) ' +
                                    str(df_filename) + ' - Fig. ' +
                                    str(figID + 1) + ' of ' + str(figMAX) +
                                    '\n Initial parameters: dt = ' +
                                    str(round(TimeRes * 1000, 3)) +
                                    ' ms, df = ' + str(round(df / 1000., 3)) +
                                    ' kHz, Receiver: ' + str(df_system_name) +
                                    ', Place: ' + str(df_obs_place) + '\n' +
                                    ReceiverMode + ', Description: ' +
                                    str(df_description))

                        fig_file_name = (result_path + '/Initial_spectra/' +
                                         df_filename[0:14] +
                                         ' Initial dynamic spectrum fig.' +
                                         str(figID + 1) + '.png')

                        TwoDynSpectraPlot(Data_ChA.transpose(),
                                          Data_ChB.transpose(), Vmin, Vmax,
                                          Vmin, Vmax, Suptitle,
                                          'Intensity, dB', 'Intensity, dB',
                                          Nsp, TimeFigureScaleFig,
                                          TimeScaleFig, frequency,
                                          FreqPointsNum, colormap, 'Channel A',
                                          'Channel B', fig_file_name,
                                          currentDate, currentTime,
                                          Software_version, customDPI)

                    # *** FIGURE Initial correlation spectrum Module and Phase (python 3 new version) ***
                    if (Mode == 2 and CorrSpecSaveInitial == 1
                            and CorrelationProcess == 1):

                        Suptitle = ('Correlation dynamic spectrum (initial) ' +
                                    str(df_filename) + ' - Fig. ' +
                                    str(figID + 1) + ' of ' + str(figMAX) +
                                    '\n Initial parameters: dt = ' +
                                    str(round(TimeRes * 1000, 3)) +
                                    ' ms, df = ' + str(round(df / 1000., 3)) +
                                    ' kHz, Receiver: ' + str(df_system_name) +
                                    ', Place: ' + str(df_obs_place) + '\n' +
                                    ReceiverMode + ', Description: ' +
                                    str(df_description))

                        fig_file_name = (result_path +
                                         '/Correlation_spectra/' +
                                         df_filename[0:14] +
                                         ' Correlation dynamic spectrum fig.' +
                                         str(figID + 1) + '.png')

                        TwoDynSpectraPlot(CorrModule.transpose(),
                                          CorrPhase.transpose(), VminCorrMag,
                                          VmaxCorrMag, -3.15, 3.15, Suptitle,
                                          'Intensity, dB', 'Phase, rad', Nsp,
                                          TimeFigureScaleFig, TimeScaleFig,
                                          frequency, FreqPointsNum, colormap,
                                          'Correlation module',
                                          'Correlation phase', fig_file_name,
                                          currentDate, currentTime,
                                          Software_version, customDPI)

                    # *** Normalizing amplitude-frequency responce ***
                    if Mode == 1 or Mode == 2:
                        Normalization_dB(Data_ChA, FreqPointsNum, Nsp)
                        Normalization_dB(Data_ChB, FreqPointsNum, Nsp)
                    if Mode == 2 and CorrelationProcess == 1 and CorrSpecSaveCleaned == 1:
                        Normalization_dB(CorrModule, FreqPointsNum, Nsp)

                    # *** Deleting cahnnels with strong RFI ***
                    if Mode == 1 or Mode == 2:
                        simple_channel_clean(Data_ChA, RFImeanConst)
                        simple_channel_clean(Data_ChB, RFImeanConst)
                    if Mode == 2 and CorrelationProcess == 1 and CorrSpecSaveCleaned == 1:
                        simple_channel_clean(CorrModule, 2 * RFImeanConst)

                    #   *** Immediate spectra ***    (only for first figure in data file)
                    if (Mode == 1 or Mode == 2
                        ) and figID == 0:  # Immediate spectrum channels A & B

                        Suptitle = (
                            'Cleaned and normalized immediate spectrum ' +
                            str(df_filename[0:18]) + ' channels A & B')
                        Title = ('Place: ' + str(df_obs_place) +
                                 ', Receiver: ' + str(df_system_name) +
                                 '. Initial parameters: dt = ' +
                                 str(round(TimeRes, 3)) + ' Sec, df = ' +
                                 str(round(df / 1000, 3)) + ' kHz ' +
                                 'Description: ' + str(df_description))
                        Filename = (
                            result_path + '/Service/' + df_filename[0:14] +
                            ' Channels A and B Immediate Spectrum after cleaning and normalizing.png'
                        )

                        TwoOrOneValuePlot(
                            2, frequency, Data_ChA[1][:], Data_ChB[1][:],
                            'Channel A', 'Channel B', frequency[0],
                            frequency[FreqPointsNum - 1], VminNorm - 5,
                            VmaxNorm, VminNorm - 5, VmaxNorm, 'Frequency, MHz',
                            'Intensity, dB', 'Intensity, dB', Suptitle, Title,
                            Filename, currentDate, currentTime,
                            Software_version)

                    # *** FIGURE Normalized dynamic spectrum channels A and B ***
                    if (Mode == 1 or Mode == 2) and DynSpecSaveCleaned == 1:

                        Suptitle = ('Dynamic spectrum (normalized) ' +
                                    str(df_filename) + ' - Fig. ' +
                                    str(figID + 1) + ' of ' + str(figMAX) +
                                    '\n Initial parameters: dt = ' +
                                    str(round(TimeRes * 1000, 3)) +
                                    ' ms, df = ' + str(round(df / 1000., 3)) +
                                    ' kHz, Receiver: ' + str(df_system_name) +
                                    ', Place: ' + str(df_obs_place) + '\n' +
                                    ReceiverMode + ', Description: ' +
                                    str(df_description))

                        fig_file_name = (result_path + '/' +
                                         df_filename[0:14] +
                                         ' Dynamic spectra fig.' +
                                         str(figID + 1) + '.png')

                        TwoDynSpectraPlot(
                            Data_ChA.transpose(), Data_ChB.transpose(),
                            VminNorm, VmaxNorm, VminNorm, VmaxNorm, Suptitle,
                            'Intensity, dB', 'Intensity, dB', Nsp,
                            TimeFigureScaleFig, TimeScaleFig, frequency,
                            FreqPointsNum, colormap, 'Channel A', 'Channel B',
                            fig_file_name, currentDate, currentTime,
                            Software_version, customDPI)

                    # *** FIGURE Normalized correlation spectrum Module and Phase ***
                    if (Mode == 2 and CorrSpecSaveCleaned == 1
                            and CorrelationProcess == 1):

                        Suptitle = (
                            'Correlation dynamic spectrum (normalized) ' +
                            str(df_filename) + ' - Fig. ' + str(figID + 1) +
                            ' of ' + str(figMAX) +
                            '\n Initial parameters: dt = ' +
                            str(round(TimeRes * 1000, 3)) + ' ms, df = ' +
                            str(round(df / 1000., 3)) + ' kHz, Receiver: ' +
                            str(df_system_name) + ', Place: ' +
                            str(df_obs_place) + '\n' + ReceiverMode +
                            ', Description: ' + str(df_description))

                        fig_file_name = (
                            result_path + '/Correlation_spectra/' +
                            df_filename[0:14] +
                            ' Correlation dynamic spectra cleaned fig.' +
                            str(figID + 1) + '.png')
                        TwoDynSpectraPlot(CorrModule.transpose(),
                                          CorrPhase.transpose(), 2 * VminNorm,
                                          2 * VmaxNorm, -3.15, 3.15, Suptitle,
                                          'Intensity, dB', 'Phase, rad', Nsp,
                                          TimeFigureScaleFig, TimeScaleFig,
                                          frequency, FreqPointsNum, colormap,
                                          'Normalized correlation module',
                                          'Correlation phase', fig_file_name,
                                          currentDate, currentTime,
                                          Software_version, customDPI)
                '''
                # Check of second counter data for linearity
                OneImmedSpecterPlot(list(range(ChunksInFile)), timeLineSecond, 'timeLineSecond',
                                    0, ChunksInFile, 0, 2000,
                                    'Time, sec', 'Second counter, sec',
                                    'Second counter',
                                    ' ',
                                    'ADR_Results/Service/' + df_filename[0:14] + ' Second counter fig.' + str(figID+1) + '.png')

                '''

                gc.collect()

            #print ('\n  Position in file: ', file.tell(), ' File size: ', df_filesize)
            #if (file.tell() == df_filesize): print ('\n  File was read till the end \n')
            if (file.tell() < df_filesize):
                print('    The difference is ', (df_filesize - file.tell()),
                      ' bytes')
                print('\n  File was NOT read till the end!!! ERROR')

        file.close()  #Here we close the data file

    ok = 1
    return ok, DAT_file_name, DAT_file_list
예제 #46
0
#}

import json
import datetime
import sys
from datetime import datetime

end_month = 12
end_date = 31

with open("{}.json".format(sys.argv[1])) as f:
    data = json.load(f)

timeseries_dict = {}
timeseries_list = []
final_dict = {}

for item in data:
    timeseries_dict["value"] = float(item["value"])
    timeseries_dict["timestamp"] = float(
        datetime.timestamp(datetime(int(item["year"]), end_month, end_date)))
    timeseries_list.append(timeseries_dict)
    timeseries_dict = {}

final_dict["data"] = timeseries_list
with open("{}.json".format(sys.argv[1]), 'w') as outfile:
    final_dict["forecast_to"] = 1767119400.0
    final_dict[
        "callback"] = "http://your.domain/yourcallback"  #this needs to be replaced as per web app
    json.dump(final_dict, outfile)
# We need while loop to make our file run ate every 5 seconds.

while True:
    ''' Determining the time
In the process to build our desired python script, we need to check the current time whether it is working time or fun time since the application will block the website access during the working time.

To check the current time, we will use the datetime module. We will check whether the datetime.now() is greater than the datetime object for 9 AM of the current date and is lesser than the datetime object for 5 PM of the current date.

The open() method opens the file stored as host_path in r+ mode. First of all, we read all the content of the file by using the read() method and store it to a variable named content.

The for loop iterates over the website list (websites) and we will check for each item of the list whether it is already present in the content.

If it is present in the hosts file content, then we must pass. Otherwise, we must write the redirect-website mapping to the hosts file so that the website hostname will be redirected to the localhost.
'''
    if datetime(datetime.now().year,
                datetime.now().month,
                datetime.now().day, 7) < datetime.now() < datetime(
                    datetime.now().year,
                    datetime.now().month,
                    datetime.now().day, 17):
        print("Working Hours")
        with open(host_path, "r+") as fileptr:
            content = fileptr.read()
            for website in websites:
                if website in content:
                    pass
                else:
                    fileptr.write(redirect + " " + website + "\n")
    else:
        ''' Removing from the hosts file
Our script is working fine for the working hours, now lets add some features for the fun hours also. In the fun hours (not working hours) we must remove the added lines from the hosts file so that the access to the blocked websites will be granted.
예제 #48
0
    def _get_attendance_duration(self, cr, uid, ids, field_name, arg, context=None):
        res = {}
        contract_pool = self.pool.get('hr.contract')
        attendance_pool = self.pool.get('resource.calendar.attendance')
        precision = self.pool.get('res.users').browse(cr, uid, uid).company_id.working_time_precision
        # 2012.10.16 LF FIX : Get timezone from context
        active_tz = pytz.timezone(context and context.get("tz","Asia/Saigon") or "Asia/Saigon")
        str_now = datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S')
        for attendance_id in ids:
            duration = 0.0
            attendance = self.browse(cr, uid, attendance_id)
            res[attendance.id] = {
                                  #Thanh: Add new fields
                                  'calendar_duration': False,
                                  'available_late': False,
                                  'leave_soon': False,
                                  'calendar_attendance_id': False,
                                  'notes': False}
            # 2012.10.16 LF FIX : Attendance in context timezone
            attendance_start = datetime.strptime(
                attendance.name, '%Y-%m-%d %H:%M:%S'
                ).replace(tzinfo=pytz.utc).astimezone(active_tz)
            next_attendance_date = str_now
            next_attendance_ids = False
            # should we compute for sign out too?
            if attendance.action == 'sign_in':
                next_attendance_ids = self.search(cr, uid, [
                    ('employee_id', '=', attendance.employee_id.id),
                    ('name', '>', attendance.name)], order='name')
                if next_attendance_ids:
                    next_attendance = self.browse(cr, uid, next_attendance_ids[0])
                    if next_attendance.action == 'sign_in':
                        # 2012.10.16 LF FIX : Attendance in context timezone
                        raise UserError(_('Error'), _(
                           'Incongruent data: sign-in %s is followed by another sign-in'
                           ) % attendance_start)
                    next_attendance_date = next_attendance.name
                # 2012.10.16 LF FIX : Attendance in context timezone
                attendance_stop = datetime.strptime(
                    next_attendance_date, '%Y-%m-%d %H:%M:%S'
                    ).replace(tzinfo=pytz.utc).astimezone(active_tz)
                duration_delta = attendance_stop - attendance_start
                duration = self.total_seconds(duration_delta) / 60.0 / 60.0
                duration = round(duration / precision) * precision
            res[attendance.id]['duration'] = duration
            res[attendance.id]['end_datetime'] = next_attendance_date
            # If contract is not specified: working days = 24/7
            res[attendance.id]['inside_calendar_duration'] = duration
            res[attendance.id]['outside_calendar_duration'] = 0.0

            active_contract_ids = self.get_active_contracts(
                    cr, uid, ids, attendance.employee_id.id, date=str_now[:10])

            if active_contract_ids and active_contract_ids != 'error' and next_attendance_ids:
                contract = contract_pool.browse(cr, uid, active_contract_ids[0])
                if contract.working_hours:
                    # TODO applicare prima arrotondamento o tolleranza?
                    if contract.working_hours.attendance_rounding:
                        float_attendance_rounding = float(contract.working_hours.attendance_rounding)
                        rounded_start_hour = self._ceil_rounding(
                            float_attendance_rounding, attendance_start)
                        rounded_stop_hour = self._floor_rounding(
                            float_attendance_rounding, attendance_stop)
                             
                        if abs(1- rounded_start_hour) < 0.01: # if shift == 1 hour
                            attendance_start = datetime(attendance_start.year, attendance_start.month,
                                attendance_start.day, attendance_start.hour + 1)
                        else:
                            attendance_start = datetime(attendance_start.year, attendance_start.month,
                                attendance_start.day, attendance_start.hour, int(round(rounded_start_hour * 60.0)))
                                 
                        attendance_stop = datetime(attendance_stop.year, attendance_stop.month,
                            attendance_stop.day, attendance_stop.hour,
                            int(round(rounded_stop_hour * 60.0)))
                         
                        # again
                        duration_delta = attendance_stop - attendance_start
                        duration = self.total_seconds(duration_delta) / 60.0 / 60.0
                        duration = round(duration / precision) * precision
                        res[attendance.id]['duration'] = duration
                         
                    res[attendance.id]['inside_calendar_duration'] = 0.0
                    res[attendance.id]['outside_calendar_duration'] = 0.0
                    calendar_id = contract.working_hours.id
                    intervals_within = 0
 
                    # split attendance in intervals = precision
                    # 2012.10.16 LF FIX : no recursion in split attendance
                    splitted_attendances = self._split_no_recursive_attendance(
                        attendance_start, duration, precision)
                    counter = 0
                    #Thanh: Get first Set
                    is_set = False
                    is_ava_late = False
                    is_le_soon = False
                    for atomic_attendance in splitted_attendances:
                        counter += 1
                        centered_attendance = atomic_attendance[0] + timedelta(
                            0,0,0,0,0, atomic_attendance[1] / 2.0)
                        centered_attendance_hour = (
                            centered_attendance.hour + centered_attendance.minute / 60.0
                            + centered_attendance.second / 60.0 / 60.0
                            )
                        # check if centered_attendance is within a working schedule                        
                        # 2012.10.16 LF FIX : weekday must be single character not int
                        weekday_char = str(unichr(centered_attendance.weekday() + 48))
                        matched_schedule_ids = attendance_pool.search(cr, uid, [
                            '&',
                            '|',
                            ('date_from', '=', False),
                            ('date_from','<=',centered_attendance.date()),
                            '|',
                            ('dayofweek', '=', False),
                            ('dayofweek','=',weekday_char),
                            ('calendar_id','=',calendar_id),
                            ('hour_to','>=',centered_attendance_hour),
                            ('hour_from','<=',centered_attendance_hour),
                            ])
                        if len(matched_schedule_ids) > 1:
                            raise UserError(_('Error'),
                                _('Wrongly configured working schedule with id %s') % str(calendar_id))
                             
                        if matched_schedule_ids:
                            calendar_attendance = attendance_pool.browse(cr, uid, matched_schedule_ids[0])
                            res[attendance.id]['calendar_attendance_id'] = calendar_attendance.id
                            #Thanh: Update Schedule Hours
                            if not is_set:
                                is_set = True
                                res[attendance.id]['calendar_duration'] = self.time_difference(
                                            calendar_attendance.hour_from,
                                            calendar_attendance.hour_to) - calendar_attendance.calendar_id.break_hours
                            #---
                            a = res[attendance.id]['duration']
                            #---
                            intervals_within += 1
                            # sign in tolerance
                            if intervals_within == 1:
#                                 calendar_attendance = attendance_pool.browse(cr, uid, matched_schedule_ids[0])
                                attendance_start_hour = (
                                    attendance_start.hour + attendance_start.minute / 60.0
                                    + attendance_start.second / 60.0 / 60.0
                                    )
                                attendance_stop_hour = (
                                    attendance_stop.hour + attendance_stop.minute / 60.0
                                    + attendance_stop.second / 60.0 / 60.0
                                    )
                            
#                                 if attendance_start_hour >= (
#                                     calendar_attendance.hour_from and
#                                     (attendance_start_hour - (calendar_attendance.hour_from +
#                                     calendar_attendance.tolerance_to)) < 0.01
#                                     ): # handling float roundings (<=)
                                if attendance_start_hour >= (calendar_attendance.hour_from + calendar_attendance.tolerance_to):
                                    
                                    additional_intervals = round(
                                        (attendance_start_hour - calendar_attendance.hour_from) / precision)
                                    
                                    intervals_within = additional_intervals
                                    
#                                     res[attendance.id]['duration'] = self.time_sum(
#                                         res[attendance.id]['duration'], attendance_start_hour - calendar_attendance.hour_from)
                                    res[attendance.id]['duration'] = (attendance_stop_hour - attendance_start_hour) - calendar_attendance.calendar_id.break_hours
                                    #Thanh: Check Available Late
                                    if attendance_start_hour > (calendar_attendance.hour_from + calendar_attendance.tolerance_to) and not is_ava_late:
                                        is_ava_late = True
                                        res[attendance.id]['available_late'] = True
                                        res[attendance.id]['color'] = 'yellow'
                                if attendance_start_hour < calendar_attendance.hour_from:
                                    res[attendance.id]['duration'] = res[attendance.id]['duration'] - (calendar_attendance.hour_from - attendance_start_hour) - calendar_attendance.calendar_id.break_hours
                            
                                    
                            # sign out tolerance
                            if len(splitted_attendances) == counter:
                                attendance_stop_hour = (
                                    attendance_stop.hour + attendance_stop.minute / 60.0
                                    + attendance_stop.second / 60.0 / 60.0
                                    )
#                                 calendar_attendance = attendance_pool.browse(cr, uid, matched_schedule_ids[0])
#                                 if attendance_stop_hour <= (
#                                     calendar_attendance.hour_to and
#                                     (attendance_stop_hour - (calendar_attendance.hour_to -
#                                     calendar_attendance.tolerance_from)) > -0.01
#                                     ): # handling float roundings (>=)
                                #Thanh: Change
                                if attendance_stop_hour <= (calendar_attendance.hour_to - calendar_attendance.tolerance_from):
                                    additional_intervals = round(
                                        (calendar_attendance.hour_to - attendance_stop_hour) / precision)
                                    intervals_within += additional_intervals
#                                     res[attendance.id]['duration'] = self.time_sum(
#                                         res[attendance.id]['duration'], additional_intervals * precision)
                                      
                                    #Thanh: Check Leave Soon
                                    if attendance_stop_hour < (calendar_attendance.hour_to - calendar_attendance.tolerance_from) and not is_le_soon:
                                        is_le_soon = True
                                        res[attendance.id]['leave_soon'] = True

                    res[attendance.id]['inside_calendar_duration'] = intervals_within * precision
                    # make difference using time in order to avoid rounding errors
                    # inside_calendar_duration can't be > duration
                    res[attendance.id]['outside_calendar_duration'] = self.time_difference(
                        res[attendance.id]['calendar_duration'],
                        res[attendance.id]['duration'])
 
                    if contract.working_hours.overtime_rounding:
                        if res[attendance.id]['outside_calendar_duration']:
                            overtime = res[attendance.id]['outside_calendar_duration']
                            if contract.working_hours.overtime_rounding_tolerance:
                                overtime = self.time_sum(overtime,
                                    contract.working_hours.overtime_rounding_tolerance)
                            float_overtime_rounding = float(contract.working_hours.overtime_rounding)
                            res[attendance.id]['outside_calendar_duration'] = math.floor(
                                overtime * float_overtime_rounding) / float_overtime_rounding
                    #Ringier: duration compare with inside_calendar_duration
                    if res[attendance.id]['calendar_duration'] > res[attendance.id]['duration']:
#                         hr_contract_pool = self.pool.get('hr.contract')
#                         hr_contract_ids = hr_contract_pool.search(cr, uid, [('employee_id', '=', res[attendance.id]['employee_id'].id)])
#                         hr_contract_obj = hr_contract_pool.browse(cr, uid, hr_contract_ids[0])
#                         if hr_contract_obj.no_attendance:
#                             cr.execute("""UPDATE hr_attendance SET color = 'white', notes = 'Not Enough Time' WHERE id = '%s' AND attendance_once_sign is not TRUE"""%(attendance.id))
                        cr.execute("""UPDATE hr_attendance SET color = 'yellow', notes = 'Not enough working hours' WHERE id = '%s' AND attendance_once_sign is not TRUE"""%(attendance.id))
                    if res[attendance.id]['available_late'] == True:
                        if res[attendance.id]['calendar_duration'] > res[attendance.id]['duration']:
                            cr.execute("""UPDATE hr_attendance SET color = 'yellow', notes = 'Late and Not enough working hours ' WHERE id = '%s' AND attendance_once_sign is not TRUE"""%(attendance.id))
                        else:
                            cr.execute("""UPDATE hr_attendance SET color = 'yellow', notes = 'Late' WHERE id = '%s' AND attendance_once_sign is not TRUE"""%(attendance.id))
                        
        return res
예제 #49
0
hs90 = np.nanpercentile(hsCombined, 90)
hs85 = np.nanpercentile(hsCombined, 85)

years = np.arange(1979, 2019)
winterHs = np.empty(len(years))
summerHs = np.empty(len(years))
summerTime = []
winterTime = []
winterHs90 = np.empty(len(years))
summerHs90 = np.empty(len(years))
winterHs95 = np.empty(len(years))
summerHs95 = np.empty(len(years))
winterHs80 = np.empty(len(years))
summerHs80 = np.empty(len(years))
for x in range(len(years)):
    t1 = datetime(years[x], 10, 1)
    t2 = datetime(years[x] + 1, 4, 30)
    tempDates = np.where((tC > t1) & (tC < t2))
    winterHs[x] = np.nanmax(hsCombined[tempDates])
    winterHs90[x] = np.nanpercentile(hsCombined[tempDates], 90)
    winterHs95[x] = np.nanpercentile(hsCombined[tempDates], 95)
    winterHs80[x] = np.nanpercentile(hsCombined[tempDates], 80)

    winterTime.append(datetime(years[x] + 1, 2, 1))

    t3 = datetime(years[x] + 1, 5, 1)
    t4 = datetime(years[x] + 2, 9, 30)
    tempDates2 = np.where((tC > t3) & (tC < t4))
    summerHs[x] = np.nanmax(hsCombined[tempDates2])
    summerHs90[x] = np.nanpercentile(hsCombined[tempDates2], 90)
    summerHs95[x] = np.nanpercentile(hsCombined[tempDates2], 95)
예제 #50
0
datetime_obj = datetime.datetime.now()
print(datetime_obj)

'''
date Class
time Class


'''  
from datetime import datetime, date, timedelta
t1 = date(year = 8769,month = 6, day =12)
t2 = date(year = 8769,month = 6, day =12)  
t3 = t1 - t2
print(t3)

t4 = datetime(year = 8769,month = 6, day =12,hour = 7,minute = 4, second = 33)
t5 = datetime(year = 2009,month = 4, day =2,hour = 4,minute = 4, second = 44)
t6 = t4-t5
print(t6)
print(type(t3))

t7 = timedelta(days =12,hours = 7,minutes = 4, seconds = 33)
t8 = timedelta(weeks = 5, days =2,hours = 4, seconds = 44)
t9 = t7-t8
print(t9)





예제 #51
0
                              dtype='uint8',
                              count=int(2 * Nsp * (FreqPointsNum + 5)))
            raw = np.reshape(raw, [2 * (FreqPointsNum + 5), Nsp], order='F')

            # Splitting time stamps and points from data
            time_stamps = raw[0:4, :]
            dataLHP = raw[5:FreqPointsNum + 5, :]
            point = raw[FreqPointsNum + 4:FreqPointsNum + 5]
            dataRHP = raw[FreqPointsNum + 6:2 * FreqPointsNum + 6, :]
            del raw

            TimeScale = []  # New for each file
            for i in range(Nsp):
                TimeScale.append(
                    datetime(int('20' + fname[-10:-8]), int(fname[-8:-6]),
                             int(fname[-6:-4]), int(time_stamps[0, i]),
                             int(time_stamps[1, i]), int(time_stamps[2, i]),
                             int(time_stamps[3, i]) * 1000))

            # *** Time resolution in ms ***
            TimeRes_dt = TimeScale[1] - TimeScale[0]
            TimeRes = float(str(TimeRes_dt)[5:11])

            TimeFigureScale = []  # Timelime (new) for each figure (Nsp)
            for i in range(Nsp):
                TimeFigureScale.append(str((TimeScale[i] - TimeScale[0])))

            TimeScaleStr = ['' for x in range(Nsp)]
            for i in range(Nsp):
                TimeScaleStr[i] = str(TimeScale[i])
            del TimeScale
예제 #52
0
def datevec2datetime(d_vec):
    '''
    Returns datetime list from a datevec matrix
    d_vec = [[y1 m1 d1 H1 M1],[y2 ,2 d2 H2 M2],..]
    '''
    return [datetime(d[0], d[1], d[2], d[3], d[4]) for d in d_vec]
예제 #53
0
 timesWon = 0
 timesLoss = 0
 #find entry pattern:
 #checkStock["52weeksHigh"] = pd.rolling_max(checkStock.High, window=252, min_periods=1)
 checkStock["52weeksHigh"] = checkStock.High.rolling(window=126,
                                                     min_periods=1,
                                                     center=False).max()
 # checkStock["48weeksHigh"] = checkStock.High.rolling(window=232, min_periods=1, center=False).max()
 checkStock["averageVol"] = checkStock.Volume.rolling(window=10,
                                                      min_periods=1,
                                                      center=False).min()
 if len(checkStock > 252):
     lastHigh = 0
     buyPrice = 0
     sellPrice = 0
     buyDate = datetime(2000, 1, 1)
     sellDate = datetime(2000, 1, 1)
     newOrder = pd.DataFrame(columns=[
         'stock', 'buyDate', 'buyPrice', 'sellDate', 'sellPrice', 'status'
     ])
     i = 252
     while i < len(checkStock) - 10:
         if openingOrder == 0:
             if checkStock.ix[i]['Close'] >= checkStock.ix[i]['Open'] and (
                     checkStock.ix[i]['High'] >=
                     checkStock.ix[i - 1]['52weeksHigh'] *
                     1.03) and checkStock.ix[i - 1]['averageVol'] > 30000:
                 lastHigh = checkStock.ix[i - 1]['52weeksHigh']
                 buyPrice = checkStock.ix[i]['Close']
                 buyDate = checkStock.index[i]
                 total_orders += 1
예제 #54
0
import datetime
import json
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.dates import datetime
from airflow.utils.dates import timedelta
from airflow.utils.task_group import TaskGroup

default_args = {
    'owner': 'astronomer',
    'depends_on_past': False,
    'start_date': datetime(2020, 12, 23),
    'email': ['*****@*****.**'],
    'email_on_failure': False,
    'email_on_retry': False,
    'retries': 1,
    'retry_delay': timedelta(minutes=5)
}

dag = DAG(
    'elt_dag',
    default_args=default_args,
    description=
    'A mock ELT pipeline that mocks implementation for singer taps, targets, and dbt.',
    schedule_interval=timedelta(days=1),
)

with dag:
    run_jobs = TaskGroup("dbt_run")
    test_jobs = TaskGroup('dbt_test')
예제 #55
0
    def test_decayingWindows(self):
        # Muthu's function to test DecayingWindows module

        associationQuery = \
            """
            select
                clinical_item_id, subsequent_item_id,
                patient_count_0, patient_count_3600, patient_count_86400, patient_count_604800,
                patient_count_2592000, patient_count_7776000, patient_count_31536000,
                patient_count_any
            from
                clinical_item_association
            where
                clinical_item_id < 0
            order by
                clinical_item_id, subsequent_item_id
            """

        decayAnalysisOptions = DecayAnalysisOptions()
        decayAnalysisOptions.startD = datetime(2000, 1, 9)
        decayAnalysisOptions.endD = datetime(2000, 2, 11)
        decayAnalysisOptions.windowLength = 10
        decayAnalysisOptions.decay = 0.9
        decayAnalysisOptions.delta = timedelta(weeks=4)
        decayAnalysisOptions.patientIds = [-22222, -33333]

        self.decayAnalyzer.decayAnalyzePatientItems(decayAnalysisOptions)

        expectedAssociationStats = \
            [
                [-11,-11,   1.9, 1.9, 1.9, 1.9, 1.9, 0, 0, 1.9],    # Note that decaying windows approach will not try to update counts for time periods longer than the delta period
                [-11, -9,   0.0, 0.0, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [-11, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted. Consider future upgrade. Don't train on all time ever, but train on two deltas at a time, sliding / shifting window so do catch the overlap ranges. Problem here is buffer based algorithm, won't be recording analyze_dates as go, so will end up with duplicate counts of items each month?
                [-11, -6,   0.9, 0.9, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [ -9,-11,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -9, -9,   0.9, 0.9, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [ -9, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -9, -6,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -8,-11,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted.
                [ -8, -9,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted.
                [ -8, -8,   0.9, 0.9, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [ -8, -6,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted.
                [ -6,-11,   0.9, 0.9, 0.9, 1.9, 1.9, 0, 0, 1.9],
                [ -6, -9,   0.0, 0.0, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [ -6, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted.
                [ -6, -6,   1.9, 1.9, 1.9, 1.9, 1.9, 0, 0, 1.9],
            ]

        associationStats = DBUtil.execute(associationQuery)
        self.assertEqualTable(expectedAssociationStats,
                              associationStats,
                              precision=3)

        #DBUtil.execute("delete from clinical_item_association")

        # Add another training period then should get a second decay multiplier for older data?
        # Weird in that incrementally building on prior data that is getting decayed, even though new training data actually occurred before chronologic time of data
        decayAnalysisOptions = DecayAnalysisOptions()
        decayAnalysisOptions.startD = datetime(2000, 1, 1)
        decayAnalysisOptions.endD = datetime(2000, 2, 12)
        decayAnalysisOptions.windowLength = 10
        decayAnalysisOptions.decay = 0.9
        decayAnalysisOptions.delta = timedelta(weeks=4)
        decayAnalysisOptions.patientIds = [-22222, -33333]

        self.decayAnalyzer.decayAnalyzePatientItems(decayAnalysisOptions)

        expectedAssociationStats = \
            [
                [-11L, -11L, 1.539, 1.539, 1.539, 1.539, 1.539, 0.0, 0.0, 1.539],
                [-11L, -9L, 0.0, 0.0, 0.729, 0.729, 0.729, 0.0, 0.0, 0.729],
                [-11L, -8L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-11L, -7L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-11L, -6L, 0.729, 0.729, 0.729, 0.729, 0.729, 0.0, 0.0, 0.729],
                [-9L, -11L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-9L, -9L, 0.729, 0.729, 0.729, 0.729, 0.729, 0.0, 0.0, 0.729],
                [-9L, -8L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-9L, -7L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-9L, -6L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-8L, -11L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-8L, -9L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-8L, -8L, 0.729, 0.729, 0.729, 0.729, 0.729, 0.0, 0.0, 0.729],
                [-8L, -6L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-7L, -11L, 0.0, 0.0, 0.0, 0.9, 0.9, 0.0, 0.0, 0.9],
                [-7L, -9L, 0.0, 0.0, 0.0, 0.9, 0.9, 0.0, 0.0, 0.9],
                [-7L, -7L, 0.9, 0.9, 0.9, 0.9, 0.9, 0.0, 0.0, 0.9],
                [-7L, -6L, 0.0, 0.0, 0.0, 0.9, 0.9, 0.0, 0.0, 0.9],
                [-6L, -11L, 0.729, 0.729, 0.729, 1.539, 1.539, 0.0, 0.0, 1.539],
                [-6L, -9L, 0.0, 0.0, 0.729, 0.729, 0.729, 0.0, 0.0, 0.729],
                [-6L, -8L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-6L, -7L, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
                [-6L, -6L, 1.539, 1.539, 1.539, 1.539, 1.539, 0.0, 0.0, 1.539],
            ]

        associationStats = DBUtil.execute(associationQuery)
        #for row in expectedAssociationStats:
        #    print >> sys.stderr, row;
        #print >> sys.stderr, "============"
        #for row in associationStats:
        #    print >> sys.stderr, row;
        #print >> sys.stderr, "============"
        self.assertEqualTable(expectedAssociationStats,
                              associationStats,
                              precision=3)
예제 #56
0
#!/usr/bin/python
# -*- coding: UTF-8 -*-

from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.selector import Selector
import os

# generate the url_list
# for cqrb the first available day is 2007,12,16
from dateutil.rrule import *
from datetime import *
date_list = list(rrule(DAILY, dtstart=datetime(2010,11,26), \
            until=datetime.today()))
date_list = [str(date) for date in date_list]
url_list = []
for date in date_list:
    year = date[0:4]
    month = date[5:7]
    day = date[8:10]
    url = "http://zqb.cyol.com/html/" + year + "-" + month \
        + "/" + day + "/nbs.D110000zgqnb_01.htm"
    url_list.append(url)

# Define the site we are going to crawl
site_name = "zqb"

# Define where we put the files
base_dir = '/data/site_data/'
data_dir = base_dir + site_name
예제 #57
0
    def test_decayingWindowsFromBuffer(self):

        associationQuery = \
            """
            select
                clinical_item_id, subsequent_item_id,
                count_0, count_3600, count_86400, count_604800,
                count_2592000, count_7776000, count_31536000,
                count_any
            from
                clinical_item_association
            where
                clinical_item_id < 0
            order by
                clinical_item_id, subsequent_item_id
            """

        decayAnalysisOptions = DecayAnalysisOptions()
        decayAnalysisOptions.startD = datetime(2000, 1, 9)
        decayAnalysisOptions.endD = datetime(2000, 2, 11)
        #decayAnalysisOptions.windowLength = 10
        decayAnalysisOptions.decay = 0.9
        decayAnalysisOptions.delta = timedelta(weeks=4)
        decayAnalysisOptions.patientIds = [-22222, -33333]
        decayAnalysisOptions.outputFile = TEMP_FILENAME

        self.decayAnalyzer.decayAnalyzePatientItems(decayAnalysisOptions)

        expectedAssociationStats = \
            [
                [-11,-11,   1.9, 1.9, 1.9, 1.9, 1.9, 0, 0, 1.9],    # Note that decaying windows approach will not try to update counts for time periods longer than the delta period
                [-11, -9,   0.0, 0.0, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [-11, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted. Consider future upgrade. Don't train on all time ever, but train on two deltas at a time, sliding / shifting window so do catch the overlap ranges
                [-11, -6,   0.9, 0.9, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [ -9,-11,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -9, -9,   0.9, 0.9, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [ -9, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -9, -6,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -8,-11,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted.
                [ -8, -9,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted.
                [ -8, -8,   0.9, 0.9, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [ -8, -6,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted.
                [ -6,-11,   0.9, 0.9, 0.9, 1.9, 1.9, 0, 0, 1.9],
                [ -6, -9,   0.0, 0.0, 0.9, 0.9, 0.9, 0, 0, 0.9],
                [ -6, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],     # 8 not in same delta as other items so co-occurence not gettign counted.
                [ -6, -6,   1.9, 1.9, 1.9, 1.9, 1.9, 0, 0, 1.9],
            ]

        associationStats = DBUtil.execute(associationQuery)
        #for row in expectedAssociationStats:
        #    print >> sys.stderr, row;
        #print >> sys.stderr, "============"
        #for row in associationStats:
        #    print >> sys.stderr, row;
        #print >> sys.stderr, "============"
        self.assertEqualTable(expectedAssociationStats,
                              associationStats,
                              precision=3)

        expectedItemBaseCountById = \
            {
                -1: 0,
                -2: 0,
                -3: 0,
                -4: 0,
                -5: 0,
                -6: 1.9,
                -7: 0,
                -8: 0.9,
                -9: 0.9,
                -10: 0,
                -11: 1.9,
                -12: 0,
                -13: 0,
                -14: 0,
                -15: 0,
                -16: 0,
            }
        itemBaseCountById = self.dataManager.loadClinicalItemBaseCountByItemId(
        )
        #print >> sys.stderr, itemBaseCountById;
        self.assertEqualDict(expectedItemBaseCountById, itemBaseCountById)

        ######## Reset the model data and rerun with different decay parameters
        self.dataManager.resetAssociationModel()

        decayAnalysisOptions = DecayAnalysisOptions()
        decayAnalysisOptions.startD = datetime(2000, 1, 9)
        decayAnalysisOptions.endD = datetime(2000, 2, 11)
        decayAnalysisOptions.windowLength = 4
        # Just specify window length, then should calculate decay parameter
        #decayAnalysisOptions.decay = 0.9
        decayAnalysisOptions.delta = timedelta(weeks=4)
        decayAnalysisOptions.patientIds = [-22222, -33333]
        decayAnalysisOptions.outputFile = TEMP_FILENAME

        self.decayAnalyzer.decayAnalyzePatientItems(decayAnalysisOptions)

        expectedAssociationStats = \
            [
                [-11,-11,   1.75, 1.75, 1.75, 1.75, 1.75, 0, 0, 1.75],
                [-11, -9,   0.0, 0.0, 0.75, 0.75, 0.75, 0, 0, 0.75],
                [-11, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [-11, -6,   0.75, 0.75, 0.75, 0.75, 0.75, 0, 0, 0.75],
                [ -9,-11,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -9, -9,   0.75, 0.75, 0.75, 0.75, 0.75, 0, 0, 0.75],
                [ -9, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -9, -6,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -8,-11,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -8, -9,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -8, -8,   0.75, 0.75, 0.75, 0.75, 0.75, 0, 0, 0.75],
                [ -8, -6,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -6,-11,   0.75, 0.75, 0.75, 1.75, 1.75, 0, 0, 1.75],
                [ -6, -9,   0.0, 0.0, 0.75, 0.75, 0.75, 0, 0, 0.75],
                [ -6, -8,   0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0.0],
                [ -6, -6,   1.75, 1.75, 1.75, 1.75, 1.75, 0, 0, 1.75],
            ]

        associationStats = DBUtil.execute(associationQuery)
        #for row in expectedAssociationStats:
        #    print >> sys.stderr, row;
        #print >> sys.stderr, "============"
        #for row in associationStats:
        #    print >> sys.stderr, row;
        #print >> sys.stderr, "============"
        self.assertEqualTable(expectedAssociationStats,
                              associationStats,
                              precision=3)

        expectedItemBaseCountById = \
            {
                -1: 0,
                -2: 0,
                -3: 0,
                -4: 0,
                -5: 0,
                -6: 1.75,
                -7: 0,
                -8: 0.75,
                -9: 0.75,
                -10: 0,
                -11: 1.75,
                -12: 0,
                -13: 0,
                -14: 0,
                -15: 0,
                -16: 0,
            }
        itemBaseCountById = self.dataManager.loadClinicalItemBaseCountByItemId(
            acceptCache=False)
        # Don't use cache, otherwise will get prior results
        #print >> sys.stderr, itemBaseCountById;
        self.assertEqualDict(expectedItemBaseCountById, itemBaseCountById)
'''
datetime module:
Three important functions
1. datetime
2. date
3. time
'''

from datetime import *

#1 datetime
y = datetime(2020, 3, 17, 8, 23, 12)  #order y-m-date-hour-min-sec

#further options
print(y)
print(y.time())
print(y.date())
print(y.hour)
print(y.minute)

print("")
#Replacing:
print(y.replace(year=2021))

print("")
#Calling today
print(y.today())

print(1 * '\n')

#2 for only date
예제 #59
0
from datetime import *
from time import *

print(datetime(2021, jun, 2).strftime("%m/%d/%y"))
예제 #60
0
    def setUp(self):
        """Prepare state for test cases"""
        DBTestCase.setUp(self)

        log.info("Populate the database with test data")
        from stride.clinical_item.ClinicalItemDataLoader import ClinicalItemDataLoader
        ClinicalItemDataLoader.build_clinical_item_psql_schemata()

        self.clinicalItemCategoryIdStrList = list()
        headers = ["clinical_item_category_id", "source_table"]
        dataModels = \
            [
                RowItemModel( [-1, "Labs"], headers ),
                RowItemModel( [-2, "Imaging"], headers ),
                RowItemModel( [-3, "Meds"], headers ),
                RowItemModel( [-4, "Nursing"], headers ),
                RowItemModel( [-5, "Problems"], headers ),
                RowItemModel( [-6, "Lab Results"], headers ),
            ]
        for dataModel in dataModels:
            (dataItemId,
             isNew) = DBUtil.findOrInsertItem("clinical_item_category",
                                              dataModel)
            self.clinicalItemCategoryIdStrList.append(str(dataItemId))

        headers = [
            "clinical_item_id", "clinical_item_category_id", "name",
            "analysis_status"
        ]
        dataModels = \
            [
                RowItemModel( [-1, -1, "CBC",1], headers ),
                RowItemModel( [-2, -1, "BMP",0], headers ), # Clear analysis status, so this will be ignored unless changed
                RowItemModel( [-3, -1, "Hepatic Panel",1], headers ),
                RowItemModel( [-4, -1, "Cardiac Enzymes",1], headers ),
                RowItemModel( [-5, -2, "CXR",1], headers ),
                RowItemModel( [-6, -2, "RUQ Ultrasound",1], headers ),
                RowItemModel( [-7, -2, "CT Abdomen/Pelvis",1], headers ),
                RowItemModel( [-8, -2, "CT PE Thorax",1], headers ),
                RowItemModel( [-9, -3, "Acetaminophen",1], headers ),
                RowItemModel( [-10, -3, "Carvedilol",1], headers ),
                RowItemModel( [-11, -3, "Enoxaparin",1], headers ),
                RowItemModel( [-12, -3, "Warfarin",1], headers ),
                RowItemModel( [-13, -3, "Ceftriaxone",1], headers ),
                RowItemModel( [-14, -4, "Foley Catheter",1], headers ),
                RowItemModel( [-15, -4, "Strict I&O",1], headers ),
                RowItemModel( [-16, -4, "Fall Precautions",1], headers ),
            ]
        for dataModel in dataModels:
            (dataItemId,
             isNew) = DBUtil.findOrInsertItem("clinical_item", dataModel)

        headers = [
            "patient_item_id", "encounter_id", "patient_id",
            "clinical_item_id", "item_date"
        ]
        dataModels = \
            [
                RowItemModel( [-1,  -111,   -11111, -4,  datetime(2000, 1, 1, 0)], headers ),
                RowItemModel( [-2,  -111,   -11111, -10, datetime(2000, 1, 1, 0)], headers ),
                RowItemModel( [-3,  -111,   -11111, -8,  datetime(2000, 1, 1, 2)], headers ),
                RowItemModel( [-4,  -112,   -11111, -10, datetime(2000, 1, 2, 0)], headers ),
                RowItemModel( [-5,  -112,   -11111, -12, datetime(2000, 2, 1, 0)], headers ),
                RowItemModel( [-10, -222,   -22222, -7,  datetime(2000, 1, 5, 0)], headers ),
                RowItemModel( [-12, -222,   -22222, -6,  datetime(2000, 1, 9, 0)], headers ),
                RowItemModel( [-13, -222,   -22222, -11, datetime(2000, 1, 9, 0)], headers ),
                RowItemModel( [-95, -222,   -22222, -9,  datetime(2000, 1,10, 0)], headers ),
                RowItemModel( [-94, -333,   -33333, -8,  datetime(2000, 1,10, 0)], headers ),    # In first window delta unit only
                RowItemModel( [-14, -333,   -33333, -6,  datetime(2000, 2, 9, 0)], headers ),
                RowItemModel( [-15, -333,   -33333, -2,  datetime(2000, 2,11, 0)], headers ),  # Will set clinical_item_link inheritances to this item to only record certain associations
                RowItemModel( [-16, -333,   -33333, -11, datetime(2000, 2,11, 0)], headers ),
            ]
        for dataModel in dataModels:
            (dataItemId,
             isNew) = DBUtil.findOrInsertItem("patient_item", dataModel)

        headers = ["clinical_item_id", "linked_item_id"]
        dataModels = \
            [   # Don't have direct, but instead demonstrate inherited relationship from 6 to 2 will still be recognized
                RowItemModel( [-6, -4], headers ),
                RowItemModel( [-4, -2], headers ),
            ]
        for dataModel in dataModels:
            (dataItemId,
             isNew) = DBUtil.findOrInsertItem("clinical_item_link", dataModel)

        self.decayAnalyzer = DecayingWindows(
        )  # DecayingWindows instance to test on, *** remember to change database to medinfo_copy
        self.dataManager = DataManager()