Exemple #1
0
def todolist(request, interval='all'):

    assert isinstance(request, HttpRequest)

    if request.method == 'GET' and 'search' in request.GET:
        search = request.GET['search']
        if not search:
           tasks = {}
        else:
           tasks = Task.objects.filter(Q(name__icontains=search) |
                                       Q(description__icontains=search) |
                                       Q(date__icontains=search))
        return render(request,
                      'app/todolist.html',
                      {
                          'title': 'ToDoList',
                          'year':datetime.now().year,
                          'message': "Hello",
                          'tasks': tasks,
                      })

    now = date.today()
    step = request.GET.get('step', timedelta(days=1))
    tasks = False
    gap = now
    if interval == 'today':
        gap = now
    elif interval == 'tomorrow':
        now += timedelta(days=1)
        gap = now
    elif interval == 'week':
        gap = now + timedelta(days=7)
    elif interval == 'month':
        gap = now + timedelta(days=31)
    elif interval == 'year':
        gap = now + timedelta(days=365)
    else:
        tasks = Task.objects.all()

    if not tasks:
        currentDay = str(now.year) + '-' + str(now.month) + '-' + str(now.day)
        nextDay = str(gap.year) + '-' + str(gap.month) + '-' + str(gap.day)
        tasks = Task.objects.filter(date__range=[currentDay, nextDay])

    return render(
        request,
        'app/todolist.html',
        {
            'title': 'ToDoList',
            'year':datetime.now().year,
            'message': "Hello",
            'tasks': tasks,

        }
    )
Exemple #2
0
def DateCreator(date):
    mydate = date.today()
    todaysName = calendar.day_name[mydate.weekday()]
    yesterday = mydate - timedelta(1)
    yesterdayName = calendar.day_name[yesterday.weekday()]
    
    # This portion of the code grabs the last day and prepares it for the url
    workingDate = date - timedelta(1)
    yesterdayYear = '20' + workingDate.strftime('%y')
    yesterdayDay = workingDate.strftime('%d')
    yesterdayMonth = workingDate.strftime('%m')
    urlCreator(yesterdayDay, yesterdayMonth, yesterdayYear, city, state)
 def __init__(self, search):
     self.solr_searcher = search
     self.graphPool = dict()
     last_time_stamp = datetime.now(pytz.utc) - timedelta(minutes=.5)
     #         self.time_slice = timedelta(minutes = 5).total_seconds()
     self.start_app_time = last_time_stamp
     self.last_time_stamp = last_time_stamp
Exemple #4
0
def getAverages(time, key, location, typeOfLocation, longTime, weekly, filter, timeWindow, resolution, sortOperators):
    #datetime.date object
    #key: if set, db query by uid: values can be uid or 0
    #weekly: true = week, false = day
    #filter: true/false
    
    #typeOfLocation gets its values as follows:
    # 0 = db query w/o area restriction
    # 1 = db query by postal code
    # 2 = db query by city name
    # more to be added
    
    #timeWindow: how many days of data is fetched (if filter=true, this doesn't matter)
    #resolution: averages are calculated for every [resolution] hours, values 1-24
    #sortOperators: true/false, whether separation of operators is desired.
    
    #some data alteration needed if key is postal code
    if typeOfLocation == 1:
        location = location + "%"
    
    #time is altered to include the given day
    time = time + timedelta(days = 1)
    
    #Different functions are called according to the parameters
    if sortOperators:
        data = getAveragesSortOperators(time, key, location, typeOfLocation, longTime, weekly, filter, timeWindow, resolution, sortOperators)
    elif longTime:
        data = getAveragesLongTime(time, key, location, typeOfLocation, sortOperators)
    elif filter:
        data = getAveragesWeekFilter(weekly, time, key, location, typeOfLocation, timeWindow, resolution, sortOperators)
    elif weekly:
        data = calculateAveragesWeekly(time, key, location, typeOfLocation, timeWindow, resolution, sortOperators,0,0)
    else:
        data = calculateAveragesDaily(time, key, location, typeOfLocation, timeWindow, resolution, sortOperators,0,0)
    return data
def DateCreator(date,deltaDate):
    
    workingDate = date - timedelta(deltaDate)
    yesterdayYear = '20' + workingDate.strftime('%y')
    yesterdayDay = workingDate.strftime('%d')
    yesterdayMonth = workingDate.strftime('%m')
    
    return(yesterdayDay,yesterdayMonth, yesterdayYear)
    def _customer_cancel(self):
        # 当前订单状态 ,是否可以直接取消的操作
        # 判断当前预定的时间是否是免费的
        if self.hotelPackageOrder.process_state == HotelPackageOrder.CUSTOMER_REQUIRE:  # 如果商家未接单,全额退还
            self.hotelPackageOrder.process_state = HotelPackageOrder.CUSTOMER_CANCEL  # 标记为用户取消
            self.customer.add_customer_points(self.hotelPackageOrder.amount)
            # warn  全额退款 生成账单
        elif self.hotelPackageOrder.process_state == HotelPackageOrder.SELLER_ACCEPT:  # 商家已接单

            # warn 商家已接单
            hotelPackageOrderItems = self.hotelPackageOrder.items.select_subclasses()
            checkin_time = self.hotelPackageOrder.checkin_time

            # 入住当天6点的时间
            deduct_all_point_time = datetime(checkin_time.year, checkin_time.month, checkin_time.day,
                                             hour=18)  # 晚于这个时间扣除全部积分 (相当于默认的入住时间)

            # 入住前一天2点的时间
            dedcut_halt_point_time = deduct_all_point_time - timedelta(hours=28)  # 入住前一天的2点之后扣除一半

            self.hotelPackageOrder.process_state = HotelPackageOrder.CUSTOMER_BACK

            if self.cur_datetime < dedcut_halt_point_time:  # 未到扣除积分的时间
                self.hotelPackageOrder.process_state = HotelPackageOrder.CUSTOMER_CANCEL
                self.customer.add_customer_points(self.hotelPackageOrder.amount)
                # warn 同样的 也是i用户取消
            else:

                now = datetime.now()
                if self.cur_datetime < deduct_all_point_time:  # 扣除一半
                    need_deduct_poins = hotelPackageOrderItems[0].point * 0.5

                    if len(hotelPackageOrderItems) > 1 and now.hour >= 14:
                        need_deduct_poins += hotelPackageOrderItems[1].point * 0.5

                    need_back_to_customer_point = int(self.hotelPackageOrder.amount - need_deduct_poins)
                    orderBill = OrderBill.create_for_roomOrder_cancel(roomOrder=self.hotelPackageOrder,
                                                                      refund_amount=need_back_to_customer_point)
                    self.orderBill = orderBill
                else:  # 扣除当天全部 改代理商 %75
                    need_deduct_poins = hotelPackageOrderItems[0].point

                    if len(hotelPackageOrderItems) > 1 and now.hour >= 14:
                        need_deduct_poins += hotelPackageOrderItems[1].point * 0.5
                    need_back_to_customer_point = int(self.hotelPackageOrder.amount - need_deduct_poins)

                    orderBill = OrderBill.create_for_roomOrder_cancel(roomOrder=self.hotelPackageOrder,
                                                                      refund_amount=need_back_to_customer_point)

                    self.orderBill = orderBill
                self.hotelPackageOrder.success = True
                self.hotelPackageOrder.settled = True
        # TODO 如果
        self.hotelPackageOrder.closed = True
        self.hotelPackageOrder.success = True
        self.hotelPackageOrder.settled = True
Exemple #7
0
def main():
    CurrentDate()
    print(holdTemp)
    print(holdDegreeDays)
    mydate = date.today()
    test33 = mydate - timedelta(1)
    capture = calendar.day_name[mydate.weekday()]  
    capture2 = calendar.day_name[test33.weekday()]
    print(capture)
    print(capture2)
 def _is_freetime_for_cancel(self):
     if self.hotelPackageOrder.process_state == HotelPackageOrder.CUSTOMER_REQUIRE:  # 如果商家未接单
         return True
     max_later_hours = app_settings.hotelOrder_free_cancel_hours
     checkin_time = self.hotelPackageOrder.checkin_time
     checkin_time = datetime(checkin_time.year, checkin_time.month, checkin_time.day, hour=14)
     delay_date = checkin_time - timedelta(hours=max_later_hours)
     if self.cur_datetime < delay_date:  # 未到扣除积分的时间
         return True
     else:
         return False
def convfdat(a):
    """
        Convert the date in the Excel document in a datetime that can be used by DJango.
        
        :param a: the date express as a float
        :type a: Float
        
        :example:
        >> convfdat(45135.45987)
        Convert the date 45135.45987 into the date 2023-07-28 11:02:13.
    """
    
    exord = datetime.toordinal(date(1900,1,1))-2
    d = datetime.fromordinal(floor(a)+exord)
    sec=round(((a-floor(a))*10**9)/11574)
    d4=d+timedelta(seconds=sec)
    return(d4)
def generateChallenge(username, entries=Global.DEFAULT_KEY_NUMBER, output_folder="users", expiration_date=None):

    if expiration_date is None:
        expiration_date = datetime.now() + timedelta(days=Global.EXPIRATION_DAYS)
    alphabet = string.ascii_letters


    keys = [generateKeyword(alphabet, Global.KEY_LENGHT) for k in range(entries)]
    challenges = [generateKeyword(alphabet, Global.CHALLENGE_LENGHT) for k in range(entries)]

    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    with open(output_folder+"/" + username + ".txt", encoding="UTF-8", mode="w") as file:
        file.write(expiration_date.strftime(Global.DATE_FORMAT) + "\n")

        file.write("\n")

        for (key, challenge) in zip(keys, challenges):
            file.write(key + Global.USER_FILE_DELIMITER + challenge + "\n")
 def propStartDiff(self, daysOffset=7):
     """
     Devuelve la diferencia en segundos entre el tiempo de comienzo del evento
     de el mismo contra mismo evento propagado daysOffset dias despues
     
     Como saber que es el mismo evento?
     Por aproximacion de fechas es lo mejor que se puede hacer...
     """
     
     md = self._propagation._datetime + timedelta(days=daysOffset)
     
     #busco los eventos propagados daysOffset despues
     
     offsetPropagations = Propagation.objects.filter(_datetime__year=md.year, 
                                                     _datetime__month=md.month,
                                                     _datetime__day=md.day)
     
     #Dentro de las propagaciones ejecutadas daysOffset dias despues me quedo 
     #con mi mismo evento de esa propagacion 
     
     events = offsetPropagations.Event_set
     
     for e in events:
         print(e)
import time
from _datetime import datetime, timedelta


print("changes test made for git")

print(time.time())
print(time.localtime())
t = datetime.now()
print(t)
print("%s/%s/%s" % (t.year, t.day, t.month))
print("%s/%s/%s,%s:%s:%s" % (t.day, t.month, t.year, t.hour, t.minute, t.second))

date = datetime.now()
print(date)
print(date + timedelta(10))
Exemple #13
0
def get_zhiji(h=1):
    """以当前之间之前的时间作为值机日期获取值机日期"""
    next_time = (datetime.now() - timedelta(hours=h)).strftime("%Y%m%d%H%M%S")
    return str(next_time)
Exemple #14
0
def load_data(file, version):

    if (version == 15):
        #load csv
        data = pd.read_csv(file + '.csv',
                           sep=',',
                           header=0,
                           usecols=[0, 1, 5, 6],
                           dtype={
                               0: np.int32,
                               1: np.int32,
                               5: str,
                               6: np.int32
                           })
        #specify header names
        data.columns = ['UserId', 'ItemId', 'TimeStr', 'ActionType']
        buy_key = 2

    elif (version == 13):
        #load csv( user_id,brand_id,action_type,action_time )
        data = pd.read_csv(file + '.csv',
                           sep=',',
                           header=0,
                           usecols=[0, 1, 2, 3],
                           dtype={
                               0: np.int32,
                               1: np.int32,
                               3: str,
                               2: np.int32
                           })
        #specify header names
        data.columns = ['UserId', 'ItemId', 'ActionType', 'TimeStr']
        buy_key = 1

    data = data[data.ActionType.isin([0, buy_key])]  #click+buy

    #convert time string to timestamp and remove the original column
    data['SessionId'] = data.groupby([data.UserId,
                                      data.TimeStr]).grouper.group_info[0]
    data['ActionNum'] = data.groupby([data.UserId, data.TimeStr]).cumcount()

    if (version == 15):
        data['Time'] = data.apply(lambda x: (datetime.strptime(
            x['TimeStr'] + '-2015 00:00:00.000', '%m%d-%Y %H:%M:%S.%f'
        ) + timedelta(seconds=x['ActionNum'])).timestamp(),
                                  axis=1)
    elif (version == 13):
        data['Time'] = data.apply(lambda x: (datetime.strptime(
            x['TimeStr'] + '-2015 00:00:00.000', '%m-%d-%Y %H:%M:%S.%f'
        ) + timedelta(seconds=x['ActionNum'])).timestamp(),
                                  axis=1)

    del (data['ActionNum'])
    del (data['TimeStr'])

    data.sort_values(['SessionId', 'Time'], inplace=True)

    #output
    data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
    data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)

    buys = data[data.ActionType == buy_key]
    data = data[data.ActionType == 0]

    del (data['ActionType'])

    print(
        'Loaded data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'
        .format(len(data), data.SessionId.nunique(), data.ItemId.nunique(),
                data_start.date().isoformat(),
                data_end.date().isoformat()))

    return data, buys
Exemple #15
0
now_second = now.timestamp()
print('now time to 1970.1.1.00.00 second =', now_second)

##### datestamp转到datetime
#### 转到当前时区的datetime
now2 = datetime.fromtimestamp(now_second)
print('now datetime =', now2)
#### 转到utc时区的datetime
utc_now2 = datetime.utcfromtimestamp(now_second)
print('now utc datetime =', utc_now2)

##### str转datetime
dt = datetime.strptime('2015-08-09 10:11:12', '%Y-%m-%d %H:%M:%S')
print('Certain datetime =', dt)
str_dt = dt.strftime('%Y-%m-%d %H:%M:%S')
print('Certain datetime str =', str_dt)
##### datetime加减
dt = dt - timedelta(days=2, hours=12)
print('Certain datetime =', dt)

#####时区转换
#先转到含有时区的datetime
utc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)
print('UTC datetime =', utc_dt)

# 转到相应的时区datetime
bj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))
print('Beijin datetime =', bj_dt)
tokyo_dt = bj_dt.astimezone(timezone(timedelta(hours=9)))
print('Tokyo datetime =', tokyo_dt)
print('END')
Exemple #16
0
def test_new_passpoints(request):
    is_invalid = 'is-invalid'
    active_user = PasswordUser.objects.get(username = request.user)
    login_info = LoginInfo.objects.filter(uid=active_user).order_by('login_period')
    test_period = login_info.count() + 1
    oldpps = NewPpInfo.objects.filter(uid=active_user).order_by('pk')
    oldpp_len = len(oldpps)
    newpps = oldpps.last()
    img_count = newpps.img_number
    img_fin = request.POST.get('img_fin')
    if img_fin == None:
        img_fin = 1
    else:
        img_fin = int(img_fin)
    if img_fin <= img_count:
        img_key = NewPpInfo.objects.filter(uid=active_user, img_number=img_fin).first()
        img_key = img_key.cp_id.iid.pk
    img = Image.objects.get(pk=img_key)
    images = Image.objects.all().order_by('?')[:8]
    confirm_info = ConfirmInfo.objects.filter(uid=active_user).last()
    confirm_date = confirm_info.date_confirmed
    login_period = login_info.count()
    test_period = login_period + 1
    if LoginInfo.objects.filter(uid=active_user).exists():
        last_login = login_info.latest('date_login')
        last_login = last_login.date_login
    today = timezone.now()
    if login_period == 0:
        d = 1
    elif login_period == 1:
        d = 3
    elif login_period == 2:
        d = 7
    else:
        try:
            sus_check = SusResult.objects.get(uid=active_user)
        except SusResult.DoesNotExist:
            sus_check = None
        if sus_check:
            return render(request, 'gp/complete.html', {})
        else:
            return render(request, 'gp/login_success.html', {'test_period': test_period})
    test_date = confirm_date + timedelta(days=d)
    if today >= test_date:
        if request.method == 'POST':
            time_prev = request.POST.get('time_prev')
            if time_prev == '':
                time_prev = 0
            else:
                time_prev = int(time_prev)
            time_create = int(request.POST.get('time'))
            time_create = time_create + time_prev
            m = math.floor(time_create / 60)
            s = time_create % 60
            time = datetime.time(0,m,s)
            key = request.POST.get('image')
            image = Image.objects.get(pk=key)
            if image == img:
                img_url = image.image.url
                return render(request, 'gp/test_newpp2.html', {'img_url': img_url, 'oldpps': oldpps, 'test_period': test_period, 'oldpp_len': oldpp_len, 'image': image, 'time_prev': time_create, 'img_fin': img_fin})
            else:
                return render(request, 'gp/test_newpp.html', {'img': img, 'images': images, 'test_period': test_period, 'is_invalid': is_invalid, 'time_prev': time_create, 'img_fin': img_fin, 'image': image})
        return render(request, 'gp/test_newpp.html', {'img': img, 'images': images, 'test_period': test_period, 'img_fin': img_fin})
    else:
        return render(request, 'gp/login_wait.html', {'test_date': test_date})
Exemple #17
0
    HIGHER_TIER_ARBITRATOR_ID, BOSS_ID
}
can_esn = {
    UPDATER_ID, DEVELOPER_ID, LOWER_TIER_ARBITRATOR_ID,
    HIGHER_TIER_ARBITRATOR_ID, BOSS_ID
}
can_notify = {
    REPORTER_ID, UPDATER_ID, DEVELOPER_ID, LOWER_TIER_ARBITRATOR_ID,
    HIGHER_TIER_ARBITRATOR_ID, BOSS_ID
}
can_movelu = {
    REPORTER_ID, UPDATER_ID, DEVELOPER_ID, LOWER_TIER_ARBITRATOR_ID,
    HIGHER_TIER_ARBITRATOR_ID, BOSS_ID
}

LIST_WAIT_TIME = timedelta(seconds=20)
ESN_WAIT_TIME = timedelta(minutes=40)
PING_INTERVAL = timedelta(minutes=10)
ML_WAIT_TIME = timedelta(seconds=15)
MLLU_WAIT_TIME = timedelta(seconds=45)
MMR_LU_WAIT_TIME = timedelta(seconds=30)
quick_delete = 5
medium_delete = 5
long_delete = 30


class TierMogi(object):
    def __init__(self, channel: discord.channel.TextChannel):
        '''
        Constructor
        '''
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

'Nginx日志聚合'

from _datetime import datetime, timedelta
import os
import pymysql

allProductIdListFileName = "/letv/data/allProductIdList_" + (datetime.now() - timedelta(days=1)).strftime('%Y%m%d') + ".txt"
nginxMapResultFileName = "/letv/data/productIdList_" + (datetime.now() - timedelta(days=1)).strftime('%Y%m%d') + ".txt"
writeFileName = "/letv/data/allProductIdCount_" + (datetime.now() - timedelta(days=1)).strftime('%Y%m%d') + ".txt"
containerOne = "="
allProductCount = {}
dataProductList = {}
mysqlConfig = {
    "host":"10.100.100.246",
    "port":3322,
    "user":"******",
    "password":"******",
    "db":"taps",
    'charset':'utf8mb4'
}
queryProductSQL = "SELECT PRO_CODE, PRO_NAME FROM PS_PRODUCT"

serverList = ["10.100.54.57", "10.100.54.58", "10.110.122.107", "10.110.122.80", "10.130.208.25", "10.130.208.33",
              "10.181.117.41", "10.181.117.81", "10.183.222.107", "10.183.222.135"]

if __name__ == "__main__":
    # Copy the productIdList from all servers
    for server in serverList:
Exemple #19
0
from _datetime import datetime, timedelta
import time, timeit
#!/usr/bin/env python
# -*- coding:utf-8 -*-
_author_ = 'weixin'
dt = datetime.now()
print(dt)
print(dt.year)
print(dt.month)
print(dt.day)

# 表示一个时间间隔
t1 = datetime.now()
print(t1.strftime("%Y-%m-%d %H:%M:%S"))
td = timedelta(hours=22)
print((t1 + td).strftime("%Y-%m-%d %H:%M:%S"))

# # timeit 时间测试和工具
# def p():
#     time.sleep(10.6)
# t1=time.time()
# p()
# print(time.time()-t1)

# 生成列表2种方法的比较

# c='''
# sum=[]
# for i in range(1000):
#     sum.append(i)
# '''
 def transform(self, dataset):
     # So the first vectors are initialized correctly:
     self.last_date = dataset[0]['date'] - timedelta(days=4)
     return np.array([self.__vectorize(x) for x in dataset])
def make_projections(num, num2, num3):
    # TODO: realistic projections
    pulled_data = pull_data()
    realistic_new_data_points = num
    realistic_new_dates = []
    realistic_cases_growth_rate = []

    growth_rate_multiplier = num2

    last_gr = float(pulled_data[8][-1]) - 1
    last_gr *= growth_rate_multiplier
    last_gr = round(last_gr, 4)
    realistic_cases_growth_rate.append(last_gr)
    for i in range(1, realistic_new_data_points):
        x_point = i
        y_point = float(
            realistic_cases_growth_rate[i - 1]) * growth_rate_multiplier
        y_point = round(y_point, 4)
        realistic_cases_growth_rate.append(y_point)
    # print(realistic_cases_growth_rate)
    now = datetime.now()
    for i in range(len(realistic_cases_growth_rate)):
        realistic_cases_growth_rate[i] += 1
        future_date = now + timedelta(days=i)
        future_date_str = future_date.strftime("%m/%d")
        # print(future_date_str)
        realistic_new_dates.append(future_date_str)
    # print(realistic_cases_growth_rate)
    # print(realistic_new_dates)

    # TODO: project cases based on realistic projections
    last_case_number = int(pulled_data[1][-1] * realistic_cases_growth_rate[0])
    cases_projections = [last_case_number]
    for i in range(len(realistic_cases_growth_rate) - 1):
        new_projection = cases_projections[i] * realistic_cases_growth_rate[i +
                                                                            1]
        cases_projections.append(int(new_projection))

    # TODO: calc new cases, new deaths
    new_projected_cases = [cases_projections[0] - pulled_data[1][-1]]
    for i in range(1, realistic_new_data_points):
        new_projected_cases_calc = cases_projections[i] - cases_projections[i -
                                                                            1]
        new_projected_cases.append(new_projected_cases_calc)
    # print(new_cases)
    # print(len(new_cases))

    # new_deaths = [0]
    # for i in range(1, len(dates)):
    #     new_deaths_calc = deaths[i] - deaths[i-1]
    #     new_deaths.append(new_deaths_calc)
    # print(new_deaths)
    # print(len(new_deaths))

    # TODO: extend data for graph
    for i in range(len(pulled_data[0]) - num3):
        cases_projections.insert(0, " ")
    for i in range(len(pulled_data[0]) - num3):
        realistic_cases_growth_rate.insert(0, " ")
    for i in range(len(pulled_data[0]) - num3):
        new_projected_cases.insert(0, " ")
    print("ran make_projections")
    return realistic_new_dates, cases_projections, realistic_cases_growth_rate, new_projected_cases
Exemple #22
0
def callbackBookTE(driver):
    #     if "Create Standard Shipment for another Carrier" in driver.page_source:
    #         driver.find_element_by_id("PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_Create_Standard_Shipment").send_keys(Keys.ENTER)
    #     elif not "Either an ID or Full Shipper information is required for a Shipper." in driver.page_source:
    #         while not "Either an ID or Full Shipper information is required for a Shipper." in driver.page_source:
    #             top = Tk()
    #             L2 = Label(top, text="Please navigate to shipment page the hit \"OK\"")
    #             L2.grid(row=0, column=0)
    #
    #             def callbackStop():
    #                 top.destroy()
    #
    #             MyButton4 = Button(top, text="OK", width=10, command=callbackStop)
    #             MyButton4.grid(row=1, column=0)
    #
    #             popUp(top)
    driver.implicitly_wait(100)
    container = Container()
    setupDM(container)
    try:
        portCode = [""]
        if container.terminal == "311":
            if "PACKER" in container.extraText or "PHL" in container.extraText:
                portCode[0] = "1101"
            elif "PNCT" in container.extraText or "APM" in container.extraText or "MAHER" in container.extraText or "NYC" in container.extraText:
                portCode[0] = "4601"
            else:
                top = Tk()

                L1 = Label(top, text="Is the terminal Packer?")
                L1.grid(row=0, column=0, columnspan=2)
                L1.config(font=("Courier", 24))

                def callbackPackerYes(portCode):
                    portCode[0] = "1101"
                    top.destroy()

                def callbackPackerNo(portCode):
                    portCode[0] = "4601"
                    top.destroy()

                MyButton4 = Button(top,
                                   text="Yes",
                                   width=20,
                                   command=lambda: callbackPackerYes(portCode))
                MyButton4.grid(row=2, column=0)
                MyButton4.config(font=("Courier", 24))

                MyButton5 = Button(top,
                                   text="No",
                                   width=20,
                                   command=lambda: callbackPackerNo(portCode))
                MyButton5.grid(row=2, column=1)
                MyButton5.config(font=("Courier", 24))
                top.lift()
                top.attributes('-topmost', True)
                top.after_idle(top.attributes, '-topmost', False)

                # get screen width and height
                ws = top.winfo_screenwidth()  # width of the screen
                hs = top.winfo_screenheight()  # height of the screen

                w = 800
                h = 150

                # calculate x and y coordinates for the Tk root window
                x = (ws / 2) - (w / 2)
                y = (hs / 2) - (h / 2)

                # set the dimensions of the screen
                # and where it is placed
                top.geometry('%dx%d+%d+%d' % (w, h, x, y))
                moveTo(946, 614)
                top.mainloop()
        elif container.terminal == "309":
            portCode[0] = "1101"
        else:
            portCode[0] = "4601"

        elem = driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_SHIPMENTTYPE"
        )
        while "Either an ID or Full Shipper information is required for a Shipper." in driver.page_source:
            try:
                elem.send_keys(Keys.ENTER)
            except:
                pass
        Select(
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_SHIPMENTTYPE"
            )).select_by_visible_text("Prefiled Inbond")
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_null"
        ).send_keys(Keys.ENTER)
        elem = driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_userEnteredSCN")
        elem.clear()
        elem.send_keys("801" + str(container.PB))
        Select(
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_POINTOFLOADINGQLFR"
            )).select_by_visible_text("Schedule K")
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_POINTOFLOADING"
        ).send_keys("80107")
        Select(
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_FDACONFIRMATIONIND"
            )).select_by_visible_text("No")

        try:
            driver.find_element_by_css_selector(
                "input[value='Find Shipper']").send_keys(Keys.ENTER)
            sleep(3)
            wait = WebDriverWait(driver, 100000000)
            wait.until(
                lambda driver:
                "Either an ID or Full Shipper information is required for a Shipper."
                in driver.page_source)
        except:
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_null"
            ).send_keys(Keys.ENTER)
            wait = WebDriverWait(driver, 10)
            wait.until(EC.alert_is_present())
            driver.switch_to_alert().accept()
        address1 = ""
        address2 = ""
        city = ""
        country = ""
        stateProv = ""
        zipPost = ""
        #     print(container.terminal)
        if container.terminal == "311":
            address1 = "C/O CSX BUFFALO"
            address2 = "257 LAKE AVE"
            city = "BUFFALO"
            country = "USA"
            stateProv = "New York"
            zipPost = "14206"
        elif container.terminal == "305":
            address1 = "C/O ASI TERMINAL"
            address2 = "MARSH ST"
            city = "NEWARK"
            country = "USA"
            stateProv = "New Jersey"
            zipPost = "07100"
        elif container.terminal == "306":
            address1 = "C/O APM TERMINALS"
            address2 = "5080 MCLESTER STEET"
            city = "NEWARK"
            country = "USA"
            stateProv = "New Jersey"
            zipPost = "07100"
        elif container.terminal == "664":
            address1 = "C/O NEW YORK CONTAINER TERMINAL"
            address2 = "WESTERN AVE"
            city = "STATEN ISLAND"
            country = "USA"
            stateProv = "New York"
            zipPost = "10303"
        elif container.terminal == "309":
            address1 = "C/O PACKER TERMINAL"
            address2 = "3301 S COLUMBUS BLVD"
            city = "PHILADELPHIA"
            country = "USA"
            stateProv = "Pennsylvania"
            zipPost = "19148"
        elif container.terminal == "330":
            address1 = "C/O MAHER TERMINAL"
            address2 = "1260 CORBIN STREET"
            city = "NEWARK"
            country = "USA"
            stateProv = "New Jersey"
            zipPost = "07201"
        elif container.terminal == "304":
            address1 = "C/O GLOBAL TERMINAL"
            address2 = "302 PORT JERSEY BLVD."
            city = "BAYONNE"
            country = "USA"
            stateProv = "New Jersey"
            zipPost = "07305"
        else:
            address1 = ""
            address2 = ""
            city = ""
            country = ""
            stateProv = ""
            zipPost = ""

        if not address1 == "":
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_CONSIGNEE_ADDRESS_STREET"
            ).send_keys(address1)
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_CONSIGNEE_ADDRESS_STREET2"
            ).send_keys(address2)
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_CONSIGNEE_ADDRESS_CITY"
            ).send_keys(city)
            Select(
                driver.find_element_by_id(
                    "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_CONSIGNEE_ADDRESS_COUNTRY"
                )).select_by_visible_text(country)
            #         try:
            Select(
                driver.find_element_by_id(
                    "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_CONSIGNEE_ADDRESS_REGION"
                )).select_by_visible_text(stateProv)
        #         except:
        #             wait = WebDriverWait(driver, 100000000)
        #             wait.until(lambda driver: "Create Standard Shipment for another Carrier" in driver.page_source)
        #             return
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_CONSIGNEE_ADDRESS_ZIP"
        ).send_keys(zipPost)
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_STANDARDSHIPMENT_CONSIGNEE_NAME"
        ).send_keys(Keys.ENTER)

        while not GetKeyState(13) < 0:
            if GetKeyState(27) < 0:
                raise AssertionError

#             True

        Select(
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_standardShipmentEquipmentType"
            )).select_by_visible_text("Create One Time")
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_standardShipmentEquipmentType"
        ).send_keys(Keys.ENTER)

        sizeSelect = ""
        if container.size == "40":
            sizeSelect = "40ft ClosedTopSeaCnt"
        elif container.size == "20":
            sizeSelect = "20ft ClosedTopSeaCnt"

        Select(
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_SHIPMENTEQUIPMENT_TYPE")
        ).select_by_visible_text(sizeSelect)
        #     print(container.containerNumber[:4] + container.containerNumber[5:11] + container.containerNumber[12:13])
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_SHIPMENTEQUIPMENT_TRANSPORTID"
        ).send_keys(container.containerNumber)
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_null"
        ).send_keys(Keys.ENTER)

        #     sleep(10)
        driver.find_element_by_xpath(
            "//form[@name='PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_createStandardShipment']/table/tbody/tr[2]/td/fieldset[6]/table/tbody/tr/td/a"
        ).send_keys(Keys.ENTER)
        # #
        container.pieces = container.pieces.replace(',', "")
        elem = driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDCOMMODITY_SHIPMENTQUANTITY"
        )
        elem.clear()
        elem.send_keys(container.pieces)
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDCOMMODITY_QUANTITYUOM"
        ).send_keys(Keys.ENTER)
        #
        while not GetKeyState(13) < 0:
            if GetKeyState(27) < 0:
                raise AssertionError
    #

        elem = driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDCOMMODITY_WEIGHT")
        elem.clear()
        container.weight = container.weight.replace(',', "")
        index = container.weight.rfind(".")
        if index > 0:
            container.weight = container.weight[:index]
        elem.send_keys(container.weight)
        Select(
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDCOMMODITY_WEIGHTUOM"
            )).select_by_visible_text('Kilograms')

        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDCOMMODITY_DESCRIPTION"
        ).send_keys(container.description)
        elem = driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDCOMMODITY_VALUE")
        elem.clear()
        elem.send_keys(Keys.ENTER)

        while not GetKeyState(13) < 0:
            if GetKeyState(27) < 0:
                raise AssertionError

        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_AvailableINBONDCOMMODITY_HTSNUMS"
        ).send_keys(Keys.ENTER)
        sleep(1)

        while not GetKeyState(13) < 0:
            if GetKeyState(27) < 0:
                raise AssertionError

        HS = driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_AvailableINBONDCOMMODITY_HTSNUMS"
        ).get_attribute("value")
        zeroes = ""
        if len(HS) < 10:
            for _ in range(10 - len(HS)):
                zeroes += "0"

        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_AvailableINBONDCOMMODITY_HTSNUMS"
        ).send_keys(zeroes)

        driver.execute_script(
            "arguments[0].click();",
            driver.find_element_by_css_selector(
                'img[src*="/ace1/wps/PA_Shipment/images/right_single.gif"]'))
        #         driver.find_element_by_css_selector('img[src*="/ace1/wps/PA_Shipment/images/right_single.gif"]').send_keys(Keys.ENTER)

        #         driver.find_element_by_css_selector('img[src*="/ace1/wps/PA_Shipment/images/right_single.gif"]').click()
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_null"
        ).send_keys(Keys.ENTER)
        #
        Select(
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_standardShipmentEquipmentType"
            )).select_by_visible_text("Conveyance")
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_standardShipmentEquipmentType"
        ).send_keys(Keys.ENTER)
        #         didntwork = True
        #         while didntwork:
        #             try:
        wait = WebDriverWait(driver, 100000000)
        wait.until(lambda driver: "Conveyance</t" in driver.page_source)

        Select(
            driver.find_element_by_id(
                "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDSHIPMENT_ENTRYTYPE"
            )).select_by_visible_text("Transportation and Exportation")
        #                 while not "Transportation and Exportation" in elem.first_selected_option.text:
        #                     failed = True
        #                     while failed:
        #                         try:
        #     Select(elem).select_by_visible_text("Transportation and Exportation")
        #                             failed = False
        #                         except:
        #                             pass
        #                     elem = Select(driver.find_element_by_id("PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDSHIPMENT_ENTRYTYPE"))
        #
        #                 didntwork=False
        #             except:
        #                 pass

        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDSHIPMENT_INBONDDESTINATION"
        ).send_keys(portCode[0])
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDSHIPMENT_BONDEDCARRIER"
        ).send_keys("98-066177700")
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDSHIPMENT_INBONDNUMBER"
        ).send_keys(container.bond)

        date = datetime.now()
        date = (date + timedelta(days=14)).strftime('%m/%d/%Y')

        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDSHIPMENT_ESTDATEOFUSDEPARTURE"
        ).send_keys(date)
        driver.find_element_by_id(
            "PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_INBONDSHIPMENT_FOREIGNPORTOFDESTINATION"
        ).send_keys(Keys.ENTER)
        wait.until(
            lambda driver: "Create Standard Shipment for another Carrier" in
            driver.page_source)
    except AssertionError:
        pass
    except:
        top = Tk()
        L1 = Label(
            top,
            text=
            "Something went wrong. Either complete the rest of this T&E manually,\n or cancel and restart."
        )
        L1.config(font=("Courier", 30))
        L1.grid(row=0, column=0)
        L2 = Label(top, text=exc_info())
        #         L2.config(font=("Courier", 30))
        L2.grid(row=1, column=0)

        def callbackDM():
            top.destroy()

        MyButton4 = Button(top, text="OK", width=20, command=callbackDM)
        MyButton4.grid(row=2, column=0)
        MyButton4.config(font=("Courier", 30))
        popUp(top, w=1700, h=200, widget=MyButton4)

#     while not GetKeyState(13)<0 and not "Create Standard Shipment for another Carrier" in driver.page_source:
#         if GetKeyState(13)<0:
#             driver.find_element_by_id("PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_PC_7_CHMCHJ3VMJ3L502FK9QRJ710G2000000_null").send_keys(Keys.ENTER)
#     while not "Create Standard Shipment for another Carrier" in driver.page_source:
#         try:
    wait = WebDriverWait(driver, 100000000)
    wait.until(lambda driver: "Create Standard Shipment for another Carrier" in
               driver.page_source)
    {
        'NAME':
        'django.contrib.auth.password_validation.CommonPasswordValidator',
    },
    {
        'NAME':
        'django.contrib.auth.password_validation.NumericPasswordValidator',
    },
]

# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/

LANGUAGE_CODE = 'en-us'

TIME_ZONE = 'UTC'

USE_I18N = True

USE_L10N = True

USE_TZ = True

# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/

STATIC_URL = '/static/'
SIMPLE_JWT = {
    'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.SlidingToken'),
    'SLIDING_TOKEN_LIFETIME': timedelta(minutes=1)
}
Exemple #24
0
# print today's date
# print yesterday's date
# ask a user to enter a date
# print the date of one week from the date entered
from _datetime import datetime, timedelta
print("Today's date is")
print(datetime.now().date())
one_day = timedelta(days=1)
Today = datetime.now()
yesterday = Today - one_day
print("date of yesterday is:")
print(yesterday.date())
one_week = timedelta(weeks=1)
next_week = Today + one_week
print("Next week is")
print(next_week)
def getDailyFundementalUpdate(runType):
    start_time = time.time()
    
    tickerPath = 'C:\\Users\\Nick\\Documents\\project MONEY\\tickers\\'
    fls = pd.Series(os.listdir(tickerPath))
    # tickDate = max(fls[fls.str.contains('tickers_')].replace({'tickers_':'','.csv':''}, regex= True))
    # tickerFile = tickerPath + 'tickers_' + tickDate + '.csv'
    #dBase = 'C:\\Users\\Nick\\Documents\\project MONEY\\data\\PJMdataBase.db'
    dBase = 'C:\\Users\\Nick\\Documents\\project MONEY\\data\\yahooDaily-postAPIchange.db'
    
    fundementalFile = 'C:\\Users\\Nick\\Documents\\project MONEY\\fundementalSymbols.csv'
    #yahooFundementalHistoryFile = 'C:\\Users\\Nick\\Documents\\project MONEY\\data\\dataBase_yahoo_fundementals.csv'
    
    tme = datetime.now()
    # yahooFundementalDayFile = 'C:\\Users\\Nick\\Documents\\project MONEY\\data\\dateFiles\\dataBase_yahoo_fundementals_' + \
    #                             str(np.datetime64(tme.date())).replace('-','') +  '.csv'
    
    #tickersBase = readTicker(dBase = dBase)
    #tickersBase = readCSV(tickerFile)
    
    exchangeMap = dBaseAction(dBase, ''' SELECT * from exchangeMap ''')[0]
    #tickersBase = dBaseAction(dBase, ''' SELECT * from tickers ''')[0]
    tickersBase = dBaseAction(dBase, ''' SELECT * from tickersNew ''')[0]
    #tickersBase = tickersBase[ ~tickersBase.ticker.isin(tickersBase1.ticker)]
    testYahooDaily = readExclude(dBase = dBase, table = 'yahooDailyExclude')
    
    tickers = tickersBase[['category', 'exchange', 'name', 'ticker', 'type']].drop_duplicates().reset_index(drop = True)
    tickers = tickers[tickers.type.isin(['stock', 'etf', 'index', 'mutual_fund','future','warrant','bond','currency'])]
    tickers = tickers.drop_duplicates().reset_index(drop = True)
    tickers = tickers[['exchange', 'ticker', 'type']].drop_duplicates().reset_index(drop = True)
    
    if runType =='fundemental':
        fundementals = readCSV(fundementalFile)
        #yahooFundementalHistory = readCSV(yahooFundementalHistoryFile)
        #url='http://finance.yahoo.com/d/quotes.csv?s=AAPL+GOOG+MSFT&f=nab'
        
        yahooFundemental = []
        couldntFind = []
        
        track = 0
        
        nw = np.datetime64(datetime.now().date())
        sets = int(len(tickers.ticker)/199)        
        for k in range(int(sets+1)):
            print(str(k) + ' of ' + str(sets) + ' sets')
            tickerSet = tickers.ticker.iloc[(k*199):((k*199)+min(len(tickers.ticker)-k*199,199)-1)].reset_index(drop=True)
            typeSet = tickers.type.iloc[(k*199):((k*199)+min(len(tickers.ticker)-k*199,199)-1)].reset_index(drop=True)
            tickerJoin = '+'.join(tickerSet)
            time.sleep(random.random()*5)
            dataNew, track = downloadYahoo(ticker = tickerJoin, symbols=fundementals, track = track)
    
            try:
                couldntFind.append(tickerSet[ ~tickerSet.isin(dataNew.symbol)])
            except:
                pass
            
            tickerSet = dataNew.symbol
            dataNew['type'] = typeSet = [tickers.type[ tickers.ticker == x].iloc[0] for x in dataNew.symbol.tolist()]
            dataNew['ID'] = dataNew.timestamp + '_' + dataNew.symbol
            yahooFundemental.append(dataNew)
            
            ''' filter ingnore tickers'''
        
            print('couldnt find the following tickers when calling csv:')
            print(couldntFind) 
            
            dataDrop = pd.concat(yahooFundemental).drop_duplicates().reset_index(drop=True).sort_values(by = ['symbol','timestamp'])
            
            writeDB(dBase = dBase, name = 'yahooFundementals', data = dataDrop, 
                    createArgs= '("' + '","'.join(dataDrop.columns.tolist()) + '", PRIMARY KEY("ID"), UNIQUE("ID"))',
                    indexColName = 'symbol',
                    args ='("' + '","'.join(dataDrop.columns.tolist()) + '")' )   
            
    elif runType == 'dailyData':
        ''' As of 01/11/2017, yahoo API discontinued. Can only trade:
            - US, UK, Canadian and German stocks
            - UK ETFs
            - UK indices?
            Thus filter these out from relevant exchanges, and include only the 3 letter currencies
        '''
        
        ccys = tickers[ tickers.ticker.str.contains('=X')].reset_index(drop = True)
        ccys = ccys[ ccys.ticker.str.len() == 5].reset_index(drop = True)
        tickers = pd.merge(tickers, exchangeMap[['yahoo_exchange','Country']], left_on = 'exchange', right_on = 'yahoo_exchange')
        tickers = tickers[ (tickers.type.isin(['stock','index','etf']) & (tickers.Country == 'UK')) | 
                          (tickers.type.isin(['stock','etf']) & (tickers.Country == 'USA')) |
                          (tickers.type.isin(['stock']) & (tickers.Country == 'Germany')) |
                          (tickers.type.isin(['stock']) & (tickers.Country == 'Canada'))]
        filterExchange = ['DJI','NAS','NMS','NYQ','NYS',##USA
                           'FGI','FSI','LSE',##UK
                           'DUS','FRA','GER',##Germany
                           'TOR','VAN']##Canada
        tickers = tickers[ tickers.exchange.isin(filterExchange)].reset_index(drop = True)
        tickers = tickers[ ~tickers.ticker.isin(testYahooDaily.tolist())].reset_index(drop = True)
        tickers = pd.concat([tickers, ccys]).reset_index(drop = True)
        
        ''' Need market cap and volume info? '''
        
        ## format like fundementals:
        base = dBaseAction(dBase, ''' Select * from yahooFundementals where symbol = "TSLA"''')[0]
        base = pd.DataFrame(columns = base.columns)
        track = 0
        dateTo = datetime.now()
        ##get 10 days in case of long holiday weekends etc
        dateFrom = datetime.now() - timedelta(10)
        yahooDailyWrite = []
        notAvailable = []
        for i in range(len(tickers.ticker)):   
            time.sleep(random.random()*1.5)
            ##### YAHOO daily data
            dataNew, track = downloadYahoo(ticker = tickers.ticker[i], dailyDateRange = np.array([dateFrom, dateTo]),track = track, adjust = False)
            ## Now we have full set of past time series, if the timeSeries available stops more than 10 days ago we will ignore    
            if (dataNew is None):
                notAvailable.append(tickers.ticker[i])
            elif (dataNew.timestamp.values.astype('datetime64[D]').max() < (np.datetime64(datetime.now()) - np.timedelta64(10,'D'))):
                notAvailable.append(tickers.ticker[i])
            else:
                print( ' '.join([str(i),'out of',str(len(tickers.ticker)),'-',tickers.ticker[i]]) + ': - Yahoo Daily - OK')
                dataNew['type'] = tickers.type[i]
                dataNew = dataNew.sort_values('timestamp')
                dataNew['ID'] = dataNew.timestamp + '_' + dataNew.ticker
                dataNew = dataNew.rename(columns = {'ticker':'symbol','close':'previous_close','adj_close':'last_trade_price_only','high':'days_high','low':'days_low'})
                dataNew = pd.concat([base,dataNew])
                #dataNew = dataNew.drop('adj_close', axis =1)
                yahooDailyWrite.append(dataNew)
            
            ## once get to 4000, save and take a break to calm servers
            if ((i%4000 == 0) & (i > 0)) | (i == (len(tickers.ticker)-1)):
                dataDrop = pd.concat(yahooDailyWrite).reset_index(drop = True)
                writeDB(dBase = dBase, name = 'yahooFundementals', data = dataDrop, 
                createArgs= '("' + '","'.join(dataDrop.columns.tolist()) + '", PRIMARY KEY("ID"), UNIQUE("ID"))',
                indexColName = 'symbol',
                args ='("' + '","'.join(dataDrop.columns.tolist()) + '")' )
                
                ## add tikers that dont have data to a list we can import to ignore
                writeDB(dBase = dBase, name = 'yahooDailyExclude', data =  pd.DataFrame(notAvailable, columns = ['ticker']).drop_duplicates().reset_index(drop=True), 
                        createArgs= '(ticker, PRIMARY KEY(ticker), UNIQUE(ticker))',
                        args ='(ticker)' )
                
                yahooDailyWrite = []
                notAvailable = []
                time.sleep(60*10)
 
    print(time.time() - start_time)
    
    ## run the incubator once data collection is finished
    reportingRun('True','True')
Exemple #26
0
from _datetime import timedelta
from airflow.operators.papermill_operator import PapermillOperator
from airflow.operators.python_operator import PythonOperator
import time, datetime, os

# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
    'owner': 'airflow',
    'depends_on_past': False,
    'start_date': days_ago(2),
    'email': ['*****@*****.**'],
    'email_on_failure': False,
    'email_on_retry': False,
    'retries': 0,
    'retry_delay': timedelta(minutes=1),
    # 'queue': 'bash_queue',
    # 'pool': 'backfill',
    # 'priority_weight': 10,
    # 'end_date': datetime(2016, 1, 1),
    # 'wait_for_downstream': False,
    # 'dag': dag,
    # 'sla': timedelta(hours=2),
    # 'execution_timeout': timedelta(seconds=300),
    # 'on_failure_callback': some_function,
    # 'on_success_callback': some_other_function,
    # 'on_retry_callback': another_function,
    # 'sla_miss_callback': yet_another_function,
    # 'trigger_rule': 'all_success'
}
Exemple #27
0
def inserirsessao(request, id):

    user_check_var = user_check(request=request,
                                user_profile=[ProfessorUniversitario])
    if user_check_var.get('exists') == False:
        return user_check_var.get('render')

    userId = user_check_var.get('firstProfile').utilizador_ptr_id
    atividade = Atividade.objects.filter(
        id=id, professoruniversitarioutilizadorid=userId)

    atividadecheck = atividade.first()
    sessoes = Sessao.objects.filter(atividadeid=atividadecheck)
    for sessao in sessoes:
        if sessao.vagas != atividadecheck.participantesmaximo:
            return render(request=request,
                          template_name='mensagem.html',
                          context={
                              'tipo': 'error',
                              'm': 'Não tem permissões para esta ação!'
                          })

    if atividade.exists():
        today = datetime.now(timezone.utc)
        diaaberto = Diaaberto.objects.get(
            datapropostasatividadesincio__lte=today,
            dataporpostaatividadesfim__gte=today)
        diainicio = diaaberto.datadiaabertoinicio.date()
        diafim = diaaberto.datadiaabertofim.date()
        totaldias = diafim - diainicio + timedelta(days=1)
        dias_diaaberto = []
        for d in range(totaldias.days):
            dias_diaaberto.append(diainicio + timedelta(days=d))
        horariosindisponiveis = []
        disp = []
        atividadeid = Atividade.objects.get(id=id)
        sessoes = Sessao.objects.all().filter(atividadeid=id)
        check = len(sessoes)
        if request.method == "POST":
            if 'new' in request.POST:
                diasessao = request.POST["diasessao"]
                print(diasessao)
                inicio = request.POST['horarioid']
                splitinicio = inicio.split(":")
                print(splitinicio)
                duracaoesperada = atividadeid.duracaoesperada
                hfim = horariofim(splitinicio, duracaoesperada)
                horario = Horario.objects.filter(
                    inicio=request.POST['horarioid'], fim=hfim).first()
                if horario is None:
                    new_Horario = Horario(inicio=inicio, fim=hfim)
                    new_Horario.save()
                else:
                    new_Horario = horario
                new_Sessao = Sessao(
                    vagas=Atividade.objects.get(id=id).participantesmaximo,
                    ninscritos=0,
                    horarioid=Horario.objects.get(id=new_Horario.id),
                    atividadeid=Atividade.objects.get(id=id),
                    dia=diasessao)
                if atividadeid.estado != "nsub":
                    atividadeid.estado = "Pendente"
                atividadeid.save()
                new_Sessao.save()
                return redirect('atividades:inserirSessao', id)
        return render(request=request,
                      template_name='atividades/proporAtividadeSessao.html',
                      context={
                          'horarios':
                          "",
                          'sessions_activity':
                          Sessao.objects.all().filter(atividadeid=id),
                          'dias':
                          dias_diaaberto,
                          'check':
                          check,
                          "id":
                          id
                      })
    else:
        return render(request=request,
                      template_name='mensagem.html',
                      context={
                          'tipo': 'error',
                          'm': 'Não tem permissões para esta ação!'
                      })
Exemple #28
0
for tz_descr in map(str.split, tz_str.split('\n')):
    tz_offset = int(float(tz_descr[0]) * 3600)
    for tz_code in tz_descr[1:]:
        tzd[tz_code] = tz_offset

if '__main__' == __name__:
    filenames = [ ]
    for f in listdir(getcwd()):
         if isfile(join(getcwd(),f)):
             if f.split(".")[-1] == 'log':
                 filenames.append(f)
    last_timestamp = dict()
    counter = {f:0 for f in filenames}
    errors = []
    max_lag = 0
    window_start = datetime.now(tzlocal()) + timedelta(days=-100)
    linebuffer = ''
    for line in fileinput.input():
        if line != '\n':
            linebuffer += line
        else:
            if linebuffer.strip():
                continue
            print(linebuffer)
            msg = json.loads(linebuffer)
            if 'note' in msg and 'at' in msg:
                ts = msg['at']
                time_received = dp.parse(ts, tzinfos=tzd)
                time_logged = datetime.now(tzlocal())
                
                if ((time_received - window_start)/timedelta(seconds=1) > 100):
Exemple #29
0
                    metadata['energy'].append(np.nan)
                    metadata['instrumentalness'].append(np.nan)
                    metadata['mode'].append(np.nan)
                    metadata['loudness'].append(np.nan)
                    metadata['speechiness'].append(np.nan)
                    metadata['tempo'].append(np.nan)
                    metadata['time_signature'].append(np.nan)
                    metadata['valence'].append(np.nan)

        except Exception:
            traceback.print_exc()

        done = i + 1
        if (done % 100 == 0):
            spent = time.time() - start
            spent_r = str(timedelta(seconds=spent))
            print(
                '-- DONE FOR {} BATCHES! {} MORE BATCHES TO GO. TIME SPENT SO FAR: {}'
                .format(done, (batches_count - done), spent_r))

    if not os.path.exists(FOLDER_OUT):
        os.makedirs(FOLDER_OUT)

    name = FOLDER_OUT + 'all_tracks_metadata.csv'
    inout.ensure_dir(name)
    print('SAVING THE RESULTS: ' + name)
    export = pd.DataFrame(metadata)
    export = export[[
        'track_id', 'popularity', 'acousticness', 'danceability', 'energy',
        'instrumentalness', 'mode', 'loudness', 'speechiness', 'tempo',
        'time_signature', 'valence'
Exemple #30
0
def get_today_date():
    return (datetime.now() - timedelta(days=1)).strftime("%Y%m%d")
Exemple #31
0
cursor.execute(
    'SELECT count(*) FROM saturn_fin.sovcombank_products WHERE inserted_date > %s '
    'AND status_code != 2 AND status_code != 6', (DATE_START_COUNT, ))
rows = cursor.fetchall()
statistics_after['НЕ одобренные и НЕ активированные'] = rows[0][0]

print('Стало:')
print(statistics_after['Одобренные'], '\t\t',
      statistics_after['Активированные'], '\t\t\t',
      statistics_after['Скрытые'], '\t\t', statistics_after['Дебетовые'], '\t',
      statistics_after['НЕ одобренные и НЕ активированные'])
print('ИЗМЕНЕНИЯ:')
print(
    statistics_after['Одобренные'] - statistics_before['Одобренные'], '\t\t',
    statistics_after['Активированные'] - statistics_before['Активированные'],
    '\t\t\t', statistics_after['Скрытые'] - statistics_before['Скрытые'],
    '\t\t', statistics_after['Дебетовые'] - statistics_before['Дебетовые'],
    '\t', statistics_after['НЕ одобренные и НЕ активированные'] -
    statistics_before['НЕ одобренные и НЕ активированные'])

# До какой даты ставить статус "Отрицательный результат"
# 15 сентября проставляем статусы на июньские и июльские заявки, 15 октября добавляется август...
date_end_otkaz = datetime.now() - timedelta(days=15) - relativedelta(months=1) + \
                 relativedelta(day=1, hour=0, minute=0, second=0, microsecond=0)
cursor = dbconn.cursor()
cursor.execute(
    'UPDATE saturn_fin.sovcombank_products SET status_code = 5 WHERE status_code IN (0,1,4,200) '
    'AND inserted_date < %s', (date_end_otkaz, ))
dbconn.commit()
dbconn.close()
Exemple #32
0
def test_new_passpoints2(request):
    is_invalid = 'is-invalid'
    active_user = PasswordUser.objects.get(username = request.user)
    login_info = LoginInfo.objects.filter(uid=active_user).order_by('login_period')
    login_period = login_info.count()
    test_period = login_period + 1
    img_fin = request.POST.get('img_fin')
    oldpps = NewPpInfo.objects.filter(uid=active_user, img_number=img_fin)
    oldpp_len = len(oldpps)
    confirm_info = ConfirmInfo.objects.filter(uid=active_user).last()
    confirm_date = confirm_info.date_confirmed
    if LoginInfo.objects.filter(uid=active_user).exists():
        last_login = login_info.latest('date_login')
        last_login = last_login.date_login
    today = timezone.now()
    if login_period == 0:
        d = 1
    elif login_period == 1:
        d = 3
    else:
        d = 7
    test_date = confirm_date + timedelta(days=d)
    newpps = NewPpInfo.objects.filter(uid=active_user).order_by('pk').last()
    img_count = newpps.img_number
    img_id = oldpps.first().cp_id.iid.pk
    img_url = oldpps.first().cp_id.iid.image.url
    image = Image.objects.get(pk=img_id)
    context = {}
    context['img_fin'] = img_fin
    context['oldpps'] = oldpps
    context['test_period'] = test_period
    context['oldpp_len'] = oldpp_len
    context['img_url'] = img_url
    context['image'] = image
    if login_period < 3:
        if request.method == 'POST':
            time_prev = request.POST.get('time_prev')
            if time_prev == '':
                time_prev = 0
            else:
                time_prev = int(time_prev)
            time_create = int(request.POST.get('time'))
            time_create = time_create + time_prev
            m = math.floor(time_create / 60)
            s = time_create % 60
            time = datetime.time(0,m,s)
            operation = request.POST.get('operation')
            cpCount = request.POST.get('cpCount')
            
            if operation == 'retry':
                if cpCount == '':
                    context['time_prev'] = time_create
                    return render(request, 'gp/test_newpp2.html', context)
                else:
                    cpCount = int(cpCount)
                key = request.POST.get('image')
                image = Image.objects.get(pk=key)
                i = 1
                while i <= cpCount:
                    x = int(request.POST.get("x"+str(i)))
                    y = int(request.POST.get("y"+str(i)))
                    r = int(request.POST.get("r"+str(i)))
                    g = int(request.POST.get("g"+str(i)))
                    b = int(request.POST.get("b"+str(i)))
                    a = int(request.POST.get("a"+str(i)))
                    cp = int(request.POST.get("cp"+str(i)))
                    i+=1
                    new_cpfail = ClickPointFail(iid=image, uid=active_user, x_location=x, y_location=y, color=r+g+b+a, order=cp, img_number=img_fin)
                    new_cpfail.save()
                context['time_prev'] = time_create
                return render(request, 'gp/test_newpp2.html', context)
            else:
                if cpCount == '':
                    context['is_invalid'] = is_invalid
                    context['time_prev'] = time_create
                    return render(request, 'gp/test_newpp2.html', context)
                else:
                    cpCount = int(cpCount)
                if cpCount != oldpp_len:
                    i = 1
                    while i <= cpCount:
                        x = int(request.POST.get("x"+str(i)))
                        y = int(request.POST.get("y"+str(i)))
                        r = int(request.POST.get("r"+str(i)))
                        g = int(request.POST.get("g"+str(i)))
                        b = int(request.POST.get("b"+str(i)))
                        a = int(request.POST.get("a"+str(i)))
                        cp = int(request.POST.get("cp"+str(i)))
                        i+=1
                        new_cpfail = ClickPointFail(iid=image, uid=active_user, x_location=x, y_location=y, color=r+g+b+a, order=cp, img_number=img_fin)
                        new_cpfail.save()
                    context['is_invalid'] = is_invalid
                    context['time_prev'] = time_create
                    return render(request, 'gp/test_newpp2.html', context)
                else:
                    image = request.POST.get('image')
                    image = Image.objects.get(pk=img_id)
                    i = 1
                    while i <= cpCount:
                        x = int(request.POST.get("x"+str(i)))
                        y = int(request.POST.get("y"+str(i)))
                        r = int(request.POST.get("r"+str(i)))
                        g = int(request.POST.get("g"+str(i)))
                        b = int(request.POST.get("b"+str(i)))
                        a = int(request.POST.get("a"+str(i)))
                        cp = int(request.POST.get("cp"+str(i)))
                        i+=1
                        check_pp = ClickPoint.objects.get(uid=active_user, order=cp, iid=image, img_number=img_fin)
                        x_max = int(check_pp.x_location) + 7
                        x_min = int(check_pp.x_location) -7
                        y_max = int(check_pp.y_location) + 7
                        y_min = int(check_pp.y_location) - 7
                        if x > x_max or x < x_min or y > y_max or y < y_min:
                            new_cpfail = ClickPointFail(iid=image, uid=active_user, x_location=x, y_location=y, color=r+g+b+a, order=cp, img_number=img_fin)
                            new_cpfail.save()
                            context['is_invalid'] = is_invalid
                            context['time_prev'] = time_create
                            return render(request, 'gp/test_newpp2.html', context)
                    if int(img_fin) == int(img_count):
                        new_submit = SubmitInfo(uid=active_user, time_submit=time)
                        new_submit.save()
                        new_login = LoginInfo(uid=active_user, time_login=time, login_period=test_period)
                        new_login.save()
                        return render(request, 'gp/login_success.html', {'active_user': active_user, 'test_period': test_period, 'test_date': test_date})
                    else:
                        img_fin = int(img_fin) + 1
                        img_key = NewPpInfo.objects.filter(uid= active_user, img_number=img_fin).first()
                        img_key = img_key.cp_id.iid.pk
                        img = Image.objects.get(pk=img_key)
                        images = Image.objects.all().order_by('?')[:8]
                        return render(request, 'gp/test_newpp.html', {'img': img, 'images': images, 'test_period': test_period, 'img_fin': img_fin})
        else:
            context['time_prev'] = time_create
            return render(request, 'gp/test_newpp2.html', context)
    else:
        return redirect('/logout')
Exemple #33
0
from collections import deque
from _datetime import datetime, timedelta

data = input().split(';')
robots = deque([])
products = deque([])

start_time = datetime.strptime(input(), '%H:%M:%S')
time_add = timedelta(seconds=1)
product_time = start_time + time_add

robot = {}
# Adding robots as dictionary to the robots deque
for el in data:
    robot = {}
    name, time = el.split('-')
    time = int(time)
    robot['name'] = name
    robot['processing_time'] = time
    robot['available_at'] = product_time
    robots.append(robot)

product = input()
# Adding products to the products deque
while product != 'End':
    products.append(product)
    product = input()

# Looping through product line and robots to calculate processing time for each product
while len(products) > 0:
    current_product = products.popleft()
                year_val + '-05-28T00:00:00', year_val + '-05-29T00:00:00'
            ]
            # past_dates=[year_val+'-05-28',year_val+'-05-29']
        elif dt == (year_val + '-07-01T00:00:00') or dt == (
                year_val + '-08-01T00:00:00') or dt == (year_val +
                                                        '-09-01T00:00:00'):
            count_months -= 1
        elif dt == (year_val + '-10-01T00:00:00'):
            break

        if dt != (year_val + '-06-01T00:00:00') and count_months != 0:
            past_dates[0] = past_dates[1]
            prev_date = str(past_dates[0]).replace('T', '')
            #print(prev_date)
            next_date = datetime.strptime(prev_date, '%Y-%m-%d%H:%M:%S')
            next_date = next_date + timedelta(1)
            next_date = datetime.strftime(next_date, '%Y-%m-%d')
            next_date = next_date + 'T00:00:00'
            past_dates[1] = next_date

        lat = 5.0
        longitude = 65.0
        print(dt)
        dict_grid[dt] = {}
        list_values.append(dt)
        list_values_prev1.append(past_dates[0])
        list_values_prev2.append(past_dates[1])

        if dt == '2013-06-01T00:00:00':
            list_header.append('time')
            list_header_prev1.append('past_day1_time')
Exemple #35
0
        os.environ["DYNAMO_TABLE"] = 'prod-visual-schedules-data-table'

    def what_is_my_schedule(self, student: str, date: datetime):
        calculator = Calculator(student)
        print('What is the School Schedule for {} ?'.format(calculator.name))
        try:
            sched = calculator.is_there_school(date)
            # print (sched)
            print(
                "  ===================== {} School Schedule For {} ========================================"
                .format(sched.get("name"), date.strftime("%A, %b %d, %Y")))
            if sched.get("schedule") != None:
                schedule = sched.get("schedule")
                calculator.print_schedule(schedule)
            else:
                print(sched.get("message"))
            print("            ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
        except ValueError as e:
            print("COULD NOT FIND SCHEDULE!")
            print(e)


if __name__ == "__main__":
    question = Questioner()

    question.what_is_my_schedule("Delia", (datetime.now() + timedelta(days=1)))
    question.what_is_my_schedule("Kiera", (datetime.now() + timedelta(days=1)))
    question.what_is_my_schedule("Kiera",
                                 (datetime.now() + timedelta(days=24)))
    question.what_is_my_schedule("Delia", datetime.now())
    question.what_is_my_schedule("Kiera", datetime.now())
Exemple #36
0
def split_data_slice(data, output_file, slice_id, days_offset, days_train,
                     days_test):

    data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
    data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)

    print(
        'Full data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'
        .format(slice_id, len(data), data.SessionId.nunique(),
                data.ItemId.nunique(), data_start.isoformat(),
                data_end.isoformat()))

    start = datetime.fromtimestamp(data.Time.min(),
                                   timezone.utc) + timedelta(days_offset)
    middle = start + timedelta(days_train)
    end = middle + timedelta(days_test)

    #prefilter the timespan
    session_max_times = data.groupby('SessionId').Time.max()
    greater_start = session_max_times[
        session_max_times >= start.timestamp()].index
    lower_end = session_max_times[session_max_times <= end.timestamp()].index
    data_filtered = data[np.in1d(data.SessionId,
                                 greater_start.intersection(lower_end))]

    print(
        'Slice data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} / {}'
        .format(slice_id, len(data_filtered),
                data_filtered.SessionId.nunique(),
                data_filtered.ItemId.nunique(),
                start.date().isoformat(),
                middle.date().isoformat(),
                end.date().isoformat()))

    #split to train and test
    session_max_times = data_filtered.groupby('SessionId').Time.max()
    sessions_train = session_max_times[
        session_max_times < middle.timestamp()].index
    sessions_test = session_max_times[
        session_max_times >= middle.timestamp()].index

    train = data[np.in1d(data.SessionId, sessions_train)]

    print(
        'Train set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'
        .format(slice_id, len(train), train.SessionId.nunique(),
                train.ItemId.nunique(),
                start.date().isoformat(),
                middle.date().isoformat()))

    train.to_csv(output_file + '_train_full.' + str(slice_id) + '.txt',
                 sep='\t',
                 index=False)

    test = data[np.in1d(data.SessionId, sessions_test)]
    test = test[np.in1d(test.ItemId, train.ItemId)]

    tslength = test.groupby('SessionId').size()
    test = test[np.in1d(test.SessionId, tslength[tslength >= 2].index)]

    print(
        'Test set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} \n\n'
        .format(slice_id, len(test), test.SessionId.nunique(),
                test.ItemId.nunique(),
                middle.date().isoformat(),
                end.date().isoformat()))

    test.to_csv(output_file + '_test.' + str(slice_id) + '.txt',
                sep='\t',
                index=False)
Exemple #37
0
def getAveragesFilter(weekly, time, key, location, typeOfLocation, timeWindow, resolution, sortOperators):
    #This function calculates weekly averages, and also takes into account the previous 9 weeks 
    #as a simple FIR filter.
    
    #Function acts a little different when the averages are calculated weekly.
    if weekly: 
        val = 2
    else:
        val = 1
    values = 0
    #This for loop loops the 10 weeks, that are used to calculate averages.
    for i in range(1,11):
        #Data is fetched from database. 7 days for weekly aves, 1 day for daily aves.
        if weekly:
            data = getFromDataBase(time, key, location, typeOfLocation, -7, sortOperators)
        else:
            data = getFromDataBase(time, key, location, typeOfLocation, -1, sortOperators)
            
        newData = [(item[0], item[1], item[2], item[3], item[4], item[5][:5], item[6]) for item in data]
        
        #The functions to calculate the averages are called during the first iteration of for loop.
        if i == 1 or not values:
            if newData:
                if weekly:
                    values = calculateAveragesWeekly(0,0,0,0,0,resolution,0,1,newData)
                else:
                    values = calculateAveragesDaily(0,0,0,0,0,resolution,0,1,data)
                for line in values:
                    line[-1] += 1
        #After the first iteration, this else clause is used.  
        else:
            if newData:
                if weekly:
                    temp = calculateAveragesWeekly(0,0,0,0,0,resolution,0,1,newData)
                else:
                    temp = calculateAveragesDaily(0,0,0,0,0,resolution,0,1,newData)
                #More older the data, less it weighs in the average.
                if i > 1 and i < 4:
                    weight = 0.1
                elif i < 7:
                    weight = 0.05
                else:
                    weight = 0.025
                
                for line in range(len(temp)):
                    values[line][val] += temp[line][val] * weight
                    values[line][val+1] += temp[line][val+1] * weight
                    values[line][val+2] += temp[line][val+2] * weight
                    values[line][val+3] += temp[line][val+3]
                    if temp[line][val]:
                        values[line][-1] += weight
                    
                   
        #Time is moved back 7 or 1 days for the next iteration of for loop.
        if weekly:    
            time = time - timedelta(days = 7)
        else:
            time = time-timedelta(days = 1)
    
    #Averages are calulated and the total weight is removed.
    print(values)
    for line in values:
        if line[-1]:
            line[val] = line[val] / line[-1]
            line[val+1] = line[val+1] / line[-1]    
            line[val+2] = line[val+2] / line[-1]
        line.pop(-1)
        
    return values
    def get_stockData(self, fullUpdate=False, clean=True, filter=False):

        dBase = stockConfig.dBase

        # cut up to most recent date, accounting for weekends
        maxDate = datetime.now()
        if maxDate.isoweekday() in range(2, 7):
            maxDate = np.datetime64(maxDate).astype('datetime64[D]')
        elif maxDate.isoweekday() == 1:
            maxDate = np.datetime64(maxDate).astype('datetime64[D]') - 2
        elif maxDate.isoweekday() == 7:
            maxDate = np.datetime64(maxDate).astype('datetime64[D]') - 1

        ''' As of 01/11/2017, yahoo API discontinued. Can only trade:
            - US, UK, Canadian and German stocks
            - UK ETFs
            - UK indices?
            Thus filter these out from relevant exchanges, and include only the 3 letter currencies
        '''

        # new mongodb object
        mdb = MongoDB()
        # stockutils object
        su = StockUtils()

        tickers = su.getTickers(filter=filter)

        if fullUpdate:
            # update all market caps once a week. interweek filter on appropriate market caps and those tickers that
            #    require reporting as "changes" and rereun to get recomendations, earnings dates, etc everyday
            data = getTickerYahooStats(np.sort(tickers.ticker.values))
            data.longName = data.longName.str.replace('\'', '')
            data.longName = data.longName.str.replace('\"', '')
            writeDBv2(dBase=dBase, name=stockConfig.yahooStatsTable, data=data,
                      createArgs='("' + '","'.join(
                          data.columns.tolist()) + '", PRIMARY KEY("ticker"), UNIQUE("ticker"))',
                      indexColName='ticker',
                      args='("' + '","'.join(data.columns.tolist()) + '")')

            # set single symbol level for fast read from database as a single table of data
            data[stockConfig.statsTable] = stockConfig.statsTable
            data = mdb.getMultiIndex(data, index=[stockConfig.statsTable])
            # save to mongoDB database in "tickers" library
            mdb.save(data, stockConfig.statsTable, append=True)

        # get unique dates so all pivots have the same index
        uniqueDates = dBaseAction(dBase, ''' select distinct timestamp from %s ''' % stockConfig.dailyTable)[0]
        dates = np.unique(uniqueDates).astype('datetime64[D]')
        dates.sort()

        dateTo = datetime.now()
        # get all data for exchanges that we can actually trade, clean and update everyday
        if fullUpdate:
            dateFrom = datetime(1996, 1, 1)
            append=False
        else:
            dateFrom = dateTo - timedelta(10)
            append=True

        yahooDailyWrite = []
        notAvailable = []
        track = 0

        for i in range(len(tickers.ticker)):
            time.sleep(random.random() * 1.5)
            # -----------------
            # YAHOO daily data
            # -----------------
            dataNew, track = self.downloadYahoo(ticker=tickers.ticker[i], dailyDateRange=np.array([dateFrom, dateTo]),
                                                track=track, adjust=False)
            # Now we have full set of past time series, if the timeSeries available stops more than 10 days
            #   ago we will ignore
            if dataNew is None:
                notAvailable.append(tickers.ticker[i])
            elif (dataNew.timestamp.values.astype('datetime64[D]').max() < (
                    np.datetime64(datetime.now()) - np.timedelta64(10, 'D'))):
                notAvailable.append(tickers.ticker[i])
            else:
                print(' '.join(
                    [str(i), 'out of', str(len(tickers.ticker)), '-', tickers.ticker[i]]) + ': - Yahoo Daily - OK')
                dataNew['type'] = tickers.type[i]
                dataNew = dataNew.sort_values('timestamp')
                dataNew['ID'] = dataNew.timestamp + '_' + dataNew.ticker
                yahooDailyWrite.append(dataNew)

            # once get to 4000, save and take a break to calm servers
            if ((i % 4000 == 0) & (i > 0)) | (i == (len(tickers.ticker) - 1)):
                dataDrop = pd.concat(yahooDailyWrite).reset_index(drop=True)
                dataToWrite = dataDrop.replace({np.nan: 'N/A', np.inf: 'N/A', -np.inf: 'N/A', 'inf': 'N/A', '-inf': 'N/A'}, regex=True)
                print('writing data...')
                writeDBv2(dBase=dBase, name=stockConfig.dailyTable, data=dataToWrite,
                          createArgs='("' + '","'.join(
                              dataDrop.columns.tolist()) + '", PRIMARY KEY("ID"), UNIQUE("ID"))',
                          indexColName='"ticker"',
                          args='("' + '","'.join(dataDrop.columns.tolist()) + '")')

                # set ticker data with multi index, ticker then date
                mongoData = copy(dataToWrite)
                mongoData = mongoData.drop(['ID'], axis=1)
                # set metaData as type
                meta_data = mongoData[['ticker', 'type']].drop_duplicates().set_index('ticker').T.to_dict()
                mongoData = mdb.getMultiIndex(mongoData.drop('type', axis=1), index=['ticker', 'timestamp'])
                # save to mongoDB database in "tickers" library
                mdb.save(mongoData, stockConfig.stock_daily, append=append, meta_data=meta_data)

                # create a seperate table for FX rates to make reading fX data faster
                fxData = dataToWrite[dataToWrite.type == 'currency'].reset_index(drop=True)
                fxData = fxData[fxData.ticker.str.contains('=')].reset_index(drop=True)
                dataToWrite = fxData.replace(
                    {np.nan: 'N/A', np.inf: 'N/A', -np.inf: 'N/A', 'inf': 'N/A', '-inf': 'N/A'}, regex=True)
                if fxData.shape[0] > 0:
                    writeDBv2(dBase=dBase, name=stockConfig.dailyFXtable, data=dataToWrite,
                              createArgs='("' + '","'.join(
                                  fxData.columns.tolist()) + '", PRIMARY KEY("ID"), UNIQUE("ID"))',
                              indexColName='"ticker"',
                              args='("' + '","'.join(fxData.columns.tolist()) + '")')

                    # set ticker data with multi index, ticker then date
                    mongoData = copy(dataToWrite)
                    mongoData = mongoData.drop(['ID'], axis=1)
                    # set metaData as type
                    meta_data = mongoData[['ticker', 'type']].drop_duplicates().set_index('ticker').T.to_dict()
                    mongoData = mdb.getMultiIndex(mongoData.drop('type', axis=1), index=['ticker', 'timestamp'])
                    # save to mongoDB database in "tickers" library
                    mdb.save(mongoData, stockConfig.fx_daily, append=append, meta_data=meta_data)

                dataIgnore = pd.DataFrame(notAvailable, columns=['ticker']).drop_duplicates().reset_index(drop=True)
                if len(dataIgnore) > 0:
                    # add tickers that dont have data to a list we can import to ignore
                    writeDB(dBase=dBase, name=stockConfig.tickerExcludeTable, data=dataIgnore,
                            createArgs='("ticker", PRIMARY KEY("ticker"), UNIQUE("ticker"))',
                            args='("ticker")')

                    # set ticker data with multi index with name of table to save all data in one table
                    mongoData = copy(dataIgnore)
                    mongoData[stockConfig.tickers_exclude] = stockConfig.tickers_exclude
                    mongoData = mdb.getMultiIndex(mongoData, index=[stockConfig.tickers_exclude])
                    # save to mongoDB database in "tickers" library
                    mdb.save(mongoData, stockConfig.tickers_exclude, append=True)

                yahooDailyWrite = []
                notAvailable = []

                # dont need to clean all series every day, just once a week. during the week just insert
                if fullUpdate and clean:
                    dataDrop.timestamp = dataDrop.timestamp.astype('datetime64[D]')
                    dataDrop.adj_close = dataDrop.adj_close.astype(float)

                    # run cleaning algorithm and update yahooDailyClean
                    dataPivot = dataDrop.pivot_table(index='timestamp', columns='ticker', values='adj_close', aggfunc=sum)
                    dataPivot.index = dataPivot.index.values.astype('datetime64[D]')
                    # cleanData
                    dataClean = Analysis(data=dataPivot)
                    # must be less than todays date, dont want Nans filtering in for latest date for tickers that havnt
                    #   published yet "today"
                    dataClean, _ = dataClean.cleanseTimeSeries(cutMissingDates=True, dates=dates, cores=4,
                                                               maxDate=maxDate)

                    if len(dataClean) == 0:
                        break

                    dataClean.reset_index(inplace=True)
                    dataClean = dataClean.rename(columns={'index': 'timestamp'})
                    dataClean = pd.melt(dataClean, id_vars=['timestamp'])
                    dataClean = dataClean.rename(columns={'value': 'adj_close'})
                    # merge back volume data
                    dataClean = pd.merge(dataClean, dataDrop[['timestamp', 'ticker', 'volume']], on=['ticker', 'timestamp'],
                                         how='left')

                    dataClean['ID'] = dataClean.timestamp.astype(str) + '_' + dataClean.ticker
                    dataClean = pd.merge(dataClean, tickers[['ticker', 'type']].drop_duplicates(),
                                         on=['ticker']).reset_index(drop=True)
                    dataClean.timestamp = dataClean.timestamp.astype(str)
                    dataClean = dataClean.replace({np.nan: 'N/A', np.inf: 'N/A', -np.inf: 'N/A', 'inf': 'N/A',
                                                   '-inf': 'N/A'}, regex=True)

                    dataClean = dataClean[dataClean.adj_close != 'N/A'].reset_index(drop=True)
                else:
                    dataClean = dataDrop[['timestamp', 'ticker', 'adj_close', 'volume', 'type', 'ID']]
                print('writing cleaned data...')
                # write clean data to its own table so can call from other functions
                writeDBv2(dBase=dBase, name=stockConfig.dailyTableClean, data=dataClean,
                          createArgs='("' + '","'.join(
                              dataClean.columns.tolist()) + '", PRIMARY KEY("ID"), UNIQUE("ID"))',
                          indexColName='"ticker"',
                          args='("' + '","'.join(dataClean.columns.tolist()) + '")')

                # set ticker data with multi index, ticker then date
                mongoData = copy(dataClean)
                mongoData = mongoData.drop(['ID'], axis=1)
                # set metaData as type
                meta_data = mongoData[['ticker', 'type']].drop_duplicates().set_index('ticker').T.to_dict()
                mongoData = mdb.getMultiIndex(mongoData.drop('type', axis=1), index=['ticker', 'timestamp'])
                # save to mongoDB database in "tickers" library
                mdb.save(mongoData, stockConfig.stock_daily_clean, append=append, meta_data=meta_data)

                # sleep so yahoo dont get too angry
                time.sleep(60 * 3)
        print('stock data fetch complete')
    return tweet


def process_user(user_id):
    global duplicates
    global user_counter
    for status in tweepy.Cursor(api.user_timeline, id=user_id).items():
        try:
            db[COLLECTION_NAME].insert_one(convert_date(status))
        except pymongo.errors.DuplicateKeyError:
            duplicates += 1


twitterStream = tweepy.Stream(auth=api.auth, listener=StreamListener())
twitterStream.sample(languages=["en"], is_async=True)

# Run for specified time
start_time = datetime.now()
end_time = start_time + timedelta(minutes=RUN_TIME)

while datetime.now() < end_time:
    time.sleep(10)

twitterStream.disconnect()

for count in user_counter.most_common(10):
    print("PROCESSING USER + ", count)
    process_user(count[0])

print("Number of duplicates", duplicates)
Exemple #40
0
from pytz import timezone
from _datetime import timedelta
d = datetime(2012, 12, 21, 9, 30, 0)
print(d)

central = timezone('US/Central')
loc_d = central.localize(d)
print(loc_d)

banf_d = loc_d.astimezone(timezone('Asia/Kolkata'))
print(banf_d)

d = datetime(2013, 3, 10, 1, 45)
loc_d = central.localize(d)
print(loc_d)
later = loc_d + timedelta(minutes=30)
print(later)

from datetime import timedelta
later = central.normalize(loc_d + timedelta(minutes=30))
print(later)

import pytz
print(loc_d)
utc_d = loc_d.astimezone(pytz.utc)#世界统一时间
print(utc_d)

later_utc = utc_d + timedelta(minutes = 30)
print(later_utc.astimezone(central))

print(pytz.country_timezones['IN'])
Exemple #41
0
def izbraniPodatki():
    cursor=sql_cxn()[0]
    planiranje.cbUlica.clear()
    seznam = seznamUlic()
    danes = date.today()
    leto = timedelta(days=365)

    tabela = []
    vsota = 0
    for i in range(len(seznam)-1):
        obcina = seznam[i][0]
        naselje = seznam[i][1]
        ulica = seznam[i][2]
        
        obcina = obcina.replace(' ','%')
        naselje = naselje.replace(' ','%')
        ulica = ulica.replace(' ','%')
    
    
        queryPrikazi = """
        SELECT    *
        FROM 
            "eDimnikar".uporabnik
            LEFT  OUTER JOIN     
                (
                SELECT
                    "eDimnikar".fakture_vasco.sifra_kupca, 
                    "eDimnikar".fakture_vasco.znesek,
                    "eDimnikar".fakture_vasco.izvajalec,
                    "eDimnikar".zaposleni.priimek_ime,
                    "eDimnikar".fakture_vasco.datum_storitve,
                    rank()
                OVER 
                (
                    PARTITION BY sifra_kupca
                    ORDER BY datum_storitve
                    DESC
                )
                
                FROM "eDimnikar".fakture_vasco
                    LEFT JOIN     
                        "eDimnikar".zaposleni 
                    ON 
                        "eDimnikar".zaposleni.stara_sifra =  "eDimnikar".fakture_vasco.izvajalec  
                ) sub_query
            ON uporabnik.sifra_stara = sub_query.sifra_kupca
            LEFT JOIN     
                "eDimnikar".postne_stevilke
            ON 
                "eDimnikar".postne_stevilke.sifra_poste = "eDimnikar".uporabnik.posta_st_fk       
        WHERE
            "eDimnikar".uporabnik.rpe_obcina ILIKE '%{}%'
        AND    
            "eDimnikar".uporabnik.rpe_naselje ILIKE '%{}'
        AND 
            "eDimnikar".uporabnik.rpe_ulica ILIKE '%{}'
        AND
        (rank = 1 or rank IS NULL)
            ORDER BY "eDimnikar".uporabnik.naslov
                    ASC   ;""".format(obcina,naselje, ulica)
        
        cursor.execute(queryPrikazi)
        
        row = cursor.fetchall()
       
        hisnaOd=planiranje.planiranjeHsOd.text()
        hisnaDo=planiranje.planiranjeHsDo.text() 
        #print(hisnaDo, hisnaOd) 
        
        for i in row:
            if hisnaDo ==''and hisnaOd=='':
                tabela.append((str((i[1])),i[2],i[4],str(i[30]),str(i[31]),i[28],str(i[25]),i[26], i[17]))
                rowCount=len(tabela)
            
            elif int(i[22])>=int(hisnaOd) and int([22])<=int(hisnaDo):
                tabela.append((str((i[1])),i[2],i[4],str(i[2]),str(i[28]),i[28],str(i[25]),i[26],i[17]))
                rowCount=len(tabela)

    planiranje.twPlaniranje.setRowCount(rowCount)

    for i in range(len(tabela)):
        for j in range(len(tabela[i])):
            if (j==5):
                if tabela[i][5] !=None:
                    datum = tabela[i][5]

                    if (danes-datum) < leto:
                        print(danes-datum)
                        datumS=str(str(tabela[i][j]))
                        datumS=datumS[8:]+'.'+datumS[5:-3]+'.'+datumS[:4]
                        value = QtGui.QTableWidgetItem(datumS)
                        value1 = QtGui.QTableWidgetItem("ne naroči")
                        planiranje.twPlaniranje.setItem(i,9,value1)
                        planiranje.twPlaniranje.setItem(i,j,value)
                    else: 
                        datumS=str(str(tabela[i][j]))
                        datumS=datumS[8:]+'.'+datumS[5:-3]+'.'+datumS[:4]
                        value = QtGui.QTableWidgetItem(datumS)
                        #value = QtGui.QTableWidgetItem(str(tabela[i][j]))
                        planiranje.twPlaniranje.setItem(i,j,value)
            elif (j==6) and tabela[i][j] != 'None' :
                cena = float(tabela[i][6].strip(' "'))
                zDavkom = cena*1.22
                value3 = QtGui.QTableWidgetItem('{:.2f}'.format(zDavkom))
                planiranje.twPlaniranje.setItem(i,j,value3)
            elif (j==6) and tabela[i][j] == 'None':
                value3 = QtGui.QTableWidgetItem('')
                planiranje.twPlaniranje.setItem(i,j,value3)
                
            else:
                value = QtGui.QTableWidgetItem((tabela[i][j]))
                planiranje.twPlaniranje.setItem(i,j,value)
         

    planiranje.twPlaniranje.resizeColumnsToContents()
    
    for i in tabela:
        if i[6] != 'None':
            cena = Decimal(i[6].strip(' "'))
            vsota = vsota+cena
            
 
    planiranje.planiranjeLeReal.setText('{:.2f}'.format(vsota))
    return tabela
Exemple #42
0
    def printsol(self, sub_model=None):
        if self.model.status == GRB.OPTIMAL:
            for k in range(self.bus):
                print('Routing for bus %g' % (k + 1))
                table = BeautifulTable()
                table.column_headers = self.head

                for i, j in self.parameters.edges:
                    if self.variables.x[i, j, k].Xn > 0.5 and i not in [0, self.last] and j not in [0, self.last]:
                        self.row[i, j, k] = [i, j, str(timedelta(minutes=self.variables.t[i].Xn)).rsplit(':', 1)[0],
                                             str(timedelta(minutes=self.variables.t[j].Xn)).rsplit(':', 1)[0]]
                        table.append_row(self.row[i, j, k])
                    elif self.variables.x[i, j, k].Xn > 0.5 and i == 0:
                        self.row[i, j, k] = [i, j, str(timedelta(minutes=self.variables.td[i, k].Xn)).rsplit(':', 1)[0],
                                             str(timedelta(minutes=self.variables.t[j].Xn)).rsplit(':', 1)[0]]
                        table.append_row(self.row[i, j, k])
                    elif self.variables.x[i, j, k].Xn > 0.5 and j == self.last:
                        self.row[i, j, k] = [i, j, str(timedelta(minutes=self.variables.t[i].Xn)).rsplit(':', 1)[0],
                                             str(timedelta(minutes=self.variables.td[j, k].Xn)).rsplit(':', 1)[0]]
                        table.append_row(self.row[i, j, k])
                print(table)
            if sub_model is not None:
                for s in sub_model.parameters.xi:
                    for k in range(sub_model.bus):
                        print('Scenario %g Routing for bus %g' % (s + 1, k + 1))
                        table = BeautifulTable()
                        table.column_headers = self.head

                        for i, j in sub_model.parameters.edges:
                            if sub_model.variables.xs[i, j, k, s].Xn > 0.5 and i not in [0, sub_model.last] \
                                    and j not in [0, sub_model.last]:
                                self.row[i, j, k, s] = [i, j,
                                                        str(timedelta(minutes=sub_model.variables.ts[i, s].Xn)).rsplit(
                                                            ':', 1)[0],
                                                        str(timedelta(minutes=sub_model.variables.ts[j, s].Xn)).rsplit(
                                                            ':', 1)[0]]
                                table.append_row(self.row[i, j, k, s])
                            elif sub_model.variables.xs[i, j, k, s].Xn > 0.5 and i == 0:
                                if j != self.last:
                                    self.row[i, j, k, s] = [i, j,
                                                            str(timedelta(
                                                                minutes=sub_model.variables.tds[i, k, s].Xn)).rsplit(
                                                                ':', 1)[
                                                                0],
                                                            str(timedelta(
                                                                minutes=sub_model.variables.ts[j, s].Xn)).rsplit(':',
                                                                                                                 1)[0]]
                                else:
                                    self.row[i, j, k, s] = [i, j,
                                                            str(timedelta(
                                                                minutes=sub_model.variables.tds[i, k, s].Xn)).rsplit(
                                                                ':', 1)[
                                                                0],
                                                            str(timedelta(
                                                                minutes=sub_model.variables.tds[j, k, s].Xn)).rsplit(
                                                                ':',
                                                                1)[0]]
                                table.append_row(self.row[i, j, k, s])
                            elif sub_model.variables.xs[i, j, k, s].Xn > 0.5 and j == sub_model.last:
                                if i != 0:
                                    self.row[i, j, k, s] = [i, j,
                                                            str(timedelta(
                                                                minutes=sub_model.variables.ts[i, s].Xn)).rsplit(':',
                                                                                                                 1)[0],
                                                            str(timedelta(
                                                                minutes=sub_model.variables.tds[j, k, s].Xn)).rsplit(
                                                                ':', 1)[
                                                                0]]
                                else:
                                    self.row[i, j, k, s] = [i, j,
                                                            str(timedelta(
                                                                minutes=sub_model.variables.tds[i, k, s].Xn)).rsplit(
                                                                ':',
                                                                1)[0],
                                                            str(timedelta(
                                                                minutes=sub_model.variables.tds[j, k, s].Xn)).rsplit(
                                                                ':', 1)[
                                                                0]]
                                table.append_row(self.row[i, j, k, s])
                        print(table)
Exemple #43
0
def sync(client, config, catalog, state):
    if 'start_date' in config:
        start_date = config['start_date']
    # LOGGER.info('start_date = {}'.format(start_date))

    # Get datetimes for endpoint parameters
    communications_dttm_str = get_bookmark(state, 'communications', 'self',
                                           start_date)
    communications_dt_str = transform_datetime(communications_dttm_str)
    # LOGGER.info('communications bookmark_date = {}'.format(communications_dt_str))

    deposit_transactions_dttm_str = get_bookmark(state, 'deposit_transactions',
                                                 'self', start_date)
    deposit_transactions_dt_str = transform_datetime(
        deposit_transactions_dttm_str)
    # LOGGER.info('deposit_transactions bookmark_date = {}'.format(deposit_transactions_dt_str))

    loan_transactions_dttm_str = get_bookmark(state, 'loan_transactions',
                                              'self', start_date)
    loan_transactions_dt_str = transform_datetime(loan_transactions_dttm_str)
    loan_transactions_dttm = strptime_to_utc(loan_transactions_dt_str)

    clients_dttm_str = get_bookmark(state, 'clients', 'self', start_date)
    clients_dt_str = transform_datetime(clients_dttm_str)

    groups_dttm_str = get_bookmark(state, 'groups', 'self', start_date)
    groups_dt_str = transform_datetime(groups_dttm_str)

    lookback_days = int(config.get('lookback_window', LOOKBACK_DEFAULT))
    lookback_date = utils.now() - timedelta(lookback_days)
    if loan_transactions_dttm > lookback_date:
        loan_transactions_dt_str = transform_datetime(strftime(lookback_date))
    # LOGGER.info('loan_transactions bookmark_date = {}'.format(loan_transactions_dt_str))

    # endpoints: API URL endpoints to be called
    # properties:
    #   <root node>: Plural stream name for the endpoint
    #   path: API endpoint relative path, when added to the base URL, creates the full path
    #   api_version: v1 or v2 (default v2).
    #   api_method: GET or POST (default GET).
    #   params: Query, sort, and other endpoint specific parameters
    #   data_key: JSON element containing the records for the endpoint
    #   bookmark_query_field: Typically a date-time field used for filtering the query
    #   bookmark_field: Replication key field, typically a date-time, used for filtering the results
    #        and setting the state
    #   bookmark_type: Data type for bookmark, integer or datetime
    #   id_fields: Primary key (and other IDs) from the Parent stored when store_ids is true.
    #   children: A collection of child endpoints (where the endpoint path includes the parent id)
    #   parent: On each of the children, the singular stream name for parent element
    #   Details Level: https://api.mambu.com/?http#detail-level, FULL includes custom fields

    endpoints = {
        'branches': {
            'path': 'branches',
            'api_version': 'v2',
            'api_method': 'GET',
            'params': {
                'sortBy': 'lastModifiedDate:ASC',
                'detailsLevel': 'FULL',
                'paginationDetails': 'ON'
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'communications': {
            'path':
            'communications/messages:search',
            'api_version':
            'v2',
            'api_method':
            'POST',
            'params': {
                'detailsLevel': 'FULL'
            },
            'body': [{
                'field': 'state',
                'operator': 'EQUALS',
                'value': 'SENT'
            }, {
                'field': 'creationDate',
                'operator': 'AFTER',
                'value': communications_dt_str
            }],
            'bookmark_field':
            'creation_date',
            'bookmark_type':
            'datetime',
            'id_fields': ['encoded_key']
        },
        'centres': {
            'path': 'centres',
            'api_version': 'v2',
            'api_method': 'GET',
            'params': {
                'sortBy': 'lastModifiedDate:ASC',
                'detailsLevel': 'FULL',
                'paginationDetails': 'ON'
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'clients': {
            'path': 'clients:search',
            'api_version': 'v2',
            'api_method': 'POST',
            'params': {
                'detailsLevel': 'FULL'
            },
            'body': {
                "sortingCriteria": {
                    "field": "lastModifiedDate",
                    "order": "ASC"
                },
                "filterCriteria": [{
                    "field": "lastModifiedDate",
                    "operator": "AFTER",
                    "value": clients_dt_str
                }]
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'credit_arrangements': {
            'path': 'creditarrangements',
            'api_version': 'v2',
            'api_method': 'GET',
            'params': {
                'sortBy': 'creationDate:ASC',
                'detailsLevel': 'FULL',
                'paginationDetails': 'ON'
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'custom_field_sets': {
            'path': 'customfieldsets',
            'api_version': 'v1',
            'api_method': 'GET',
            'params': {},
            'id_fields': ['id']
        },
        'deposit_accounts': {
            'path': 'deposits',
            'api_version': 'v2',
            'api_method': 'GET',
            'params': {
                'sortBy': 'lastModifiedDate:ASC',
                'detailsLevel': 'FULL'
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id'],
            'store_ids': True,
            'children': {
                'cards': {
                    'path': 'deposits/{}/cards',
                    'api_version': 'v2',
                    'api_method': 'GET',
                    'params': {
                        'detailsLevel': 'FULL'
                    },
                    'id_fields': ['deposit_id', 'reference_token'],
                    'parent': 'deposit'
                }
            }
        },
        'deposit_products': {
            'path': 'savingsproducts',
            'api_version': 'v1',
            'api_method': 'GET',
            'params': {
                "fullDetails": True
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'deposit_transactions': {
            'path': 'deposits/transactions:search',
            'api_version': 'v2',
            'api_method': 'POST',
            'params': {
                'detailsLevel': 'FULL'
            },
            'body': {
                "sortingCriteria": {
                    "field": "creationDate",
                    "order": "ASC"
                },
                "filterCriteria": [{
                    "field": "creationDate",
                    "operator": "AFTER",
                    "value": deposit_transactions_dt_str
                }]
            },
            'bookmark_field': 'creation_date',
            'bookmark_type': 'datetime',
            'id_fields': ['encoded_key']
        },
        'groups': {
            'path': 'groups:search',
            'api_version': 'v2',
            'api_method': 'POST',
            'params': {
                'detailsLevel': 'FULL'
            },
            'body': {
                "sortingCriteria": {
                    "field": "lastModifiedDate",
                    "order": "ASC"
                },
                "filterCriteria": [{
                    "field": "lastModifiedDate",
                    "operator": "AFTER",
                    "value": groups_dt_str
                }]
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'loan_accounts': {
            'path': 'loans',
            'api_version': 'v2',
            'api_method': 'GET',
            'params': {
                'sortBy': 'lastModifiedDate:ASC',
                'detailsLevel': 'FULL',
                'paginationDetails': 'ON'
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id'],
            'children': {
                'loan_repayments': {
                    'path': 'loans/{}/repayments',
                    'api_version': 'v1',
                    'api_method': 'GET',
                    'params': {
                        'detailsLevel': 'FULL',
                        'paginationDetails': 'ON'
                    },
                    'id_fields': ['encoded_key'],
                    'parent': 'loan_accounts'
                }
            }
        },
        'loan_products': {
            'path': 'loanproducts',
            'api_version': 'v1',
            'api_method': 'GET',
            'params': {
                "fullDetails": True
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'loan_transactions': {
            'path': 'loans/transactions:search',
            'api_version': 'v2',
            'api_method': 'POST',
            'params': {
                'detailsLevel': 'FULL'
            },
            'body': {
                "sortingCriteria": {
                    "field": "creationDate",
                    "order": "ASC"
                },
                "filterCriteria": [{
                    "field": "creationDate",
                    "operator": "AFTER",
                    "value": loan_transactions_dt_str
                }]
            },
            'bookmark_field': 'creation_date',
            'bookmark_type': 'datetime',
            'id_fields': ['encoded_key']
        },
        'tasks': {
            'path': 'tasks',
            'api_version': 'v2',
            'api_method': 'GET',
            'params': {
                'sortBy': 'lastModifiedDate:ASC',
                'detailsLevel': 'FULL',
                'paginationDetails': 'ON'
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'users': {
            'path': 'users',
            'api_version': 'v2',
            'api_method': 'GET',
            'params': {
                'sortBy': 'lastModifiedDate:ASC',
                'detailsLevel': 'FULL',
                'paginationDetails': 'ON'
            },
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'id_fields': ['id']
        },
        'gl_accounts': {
            'path': 'glaccounts',
            'api_version': 'v1',
            'api_method': 'GET',
            'params': {
                'type': '{sub_type}'
            },
            'id_fields': ['gl_code'],
            'bookmark_field': 'last_modified_date',
            'bookmark_type': 'datetime',
            'sub_types': ['ASSET', 'LIABILITY', 'EQUITY', 'INCOME', 'EXPENSE']
        },
        'gl_journal_entries': {
            'path': 'gljournalentries/search',
            'api_version': 'v1',
            'api_method': 'POST',
            'body': {
                "filterConstraints": [{
                    "filterSelection": "CREATION_DATE",
                    "filterElement": "BETWEEN",
                    "value": '{gl_journal_entries_from_dt_str}',
                    "secondValue": "{now_date_str}"
                }]
            },
            'id_fields': ['entry_id'],
            'bookmark_field': 'booking_date',
            'bookmark_type': 'datetime'
        },
        'activities': {
            'path': 'activities',
            'api_version': 'v1',
            'api_method': 'GET',
            'params': {
                'from': '{activities_from_dt_str}',
                'to': '{now_date_str}'
            },
            'id_fields': ['encoded_key'],
            'bookmark_field': 'timestamp',
            'bookmark_type': 'datetime'
        },
        'index_rate_sources': {
            'path': 'indexratesources',
            'api_version': 'v2',
            'api_method': 'GET',
            'id_fields': ['encoded_key'],
            'params': {}
        },
        'installments': {
            'path': 'installments',
            'api_version': 'v2',
            'api_method': 'GET',
            'id_fields': ['encoded_key'],
            'params': {
                'dueFrom': '{installments_from_dt_str}',
                'dueTo': '{now_date_str}'
            },
            'bookmark_field': 'last_paid_date',
            'bookmark_type': 'datetime'
        }
    }

    selected_streams = get_selected_streams(catalog)
    LOGGER.info('selected_streams: {}'.format(selected_streams))

    if not selected_streams:
        return

    # last_stream = Previous currently synced stream, if the load was interrupted
    last_stream = singer.get_currently_syncing(state)
    LOGGER.info('last/currently syncing stream: {}'.format(last_stream))

    # Start syncing from last/currently syncing stream
    if last_stream in selected_streams:
        selected_streams = selected_streams[selected_streams.index(
            last_stream):] + selected_streams[:selected_streams.
                                              index(last_stream)]

    # For each endpoint (above), determine if the stream should be streamed
    #   (based on the catalog and last_stream), then sync those streams.
    for stream_name in selected_streams:
        endpoint_config = endpoints.get(stream_name)
        if endpoint_config is None:
            # null endpoint_config signifies that this is a child stream
            continue
        should_stream, last_stream = should_sync_stream(
            selected_streams, last_stream, stream_name)

        if should_stream:
            # loop through each sub type
            sub_types = endpoint_config.get('sub_types', ['self'])
            for sub_type in sub_types:
                LOGGER.info('START Syncing: {}, Type: {}'.format(
                    stream_name, sub_type))

                # Now date
                if stream_name == 'gl_journal_entries':
                    now_date_str = strftime(utils.now())[:10]
                    gl_journal_entries_from_dttm_str = get_bookmark(
                        state, 'gl_journal_entries', sub_type, start_date)
                    gl_journal_entries_from_dt_str = transform_datetime(
                        gl_journal_entries_from_dttm_str)[:10]
                    gl_journal_entries_from_param = endpoint_config.get(
                        'body', {}).get('filterConstraints',
                                        {})[0].get('value')
                    if gl_journal_entries_from_param:
                        endpoint_config['body']['filterConstraints'][0][
                            'value'] = gl_journal_entries_from_dt_str
                    gl_journal_entries_to_param = endpoint_config.get(
                        'body', {}).get('filterConstraints',
                                        {})[0].get('secondValue')
                    if gl_journal_entries_to_param:
                        endpoint_config['body']['filterConstraints'][0][
                            'secondValue'] = now_date_str

                if stream_name == 'activities':
                    now_date_str = strftime(utils.now())[:10]
                    activities_from_dttm_str = get_bookmark(
                        state, 'activities', sub_type, start_date)
                    activities_from_dt_str = transform_datetime(
                        activities_from_dttm_str)[:10]
                    activities_from_param = endpoint_config.get('params',
                                                                {}).get('from')
                    if activities_from_param:
                        endpoint_config['params'][
                            'from'] = activities_from_dt_str
                    activities_to_param = endpoint_config.get('params',
                                                              {}).get('to')
                    if activities_to_param:
                        endpoint_config['params']['to'] = now_date_str

                if stream_name == 'installments':
                    now_date_str = strftime(utils.now())[:10]
                    installments_from_dttm_str = get_bookmark(
                        state, 'installments', sub_type, start_date)
                    installments_from_dt_str = transform_datetime(
                        installments_from_dttm_str)[:10]
                    installments_from_param = endpoint_config.get(
                        'params', {}).get('dueFrom')
                    if installments_from_param:
                        endpoint_config['params'][
                            'dueFrom'] = installments_from_dt_str
                    installments_to_param = endpoint_config.get(
                        'params', {}).get('dueTo')
                    if installments_to_param:
                        endpoint_config['params']['dueTo'] = now_date_str

                update_currently_syncing(state, stream_name)
                path = endpoint_config.get('path')
                sub_type_param = endpoint_config.get('params', {}).get('type')
                if sub_type_param:
                    endpoint_config['params']['type'] = sub_type

                total_records = sync_endpoint(
                    client=client,
                    catalog=catalog,
                    state=state,
                    start_date=start_date,
                    stream_name=stream_name,
                    path=path,
                    endpoint_config=endpoint_config,
                    api_version=endpoint_config.get('api_version', 'v2'),
                    api_method=endpoint_config.get('api_method', 'GET'),
                    static_params=endpoint_config.get('params', {}),
                    sub_type=sub_type,
                    bookmark_query_field=endpoint_config.get(
                        'bookmark_query_field'),
                    bookmark_field=endpoint_config.get('bookmark_field'),
                    bookmark_type=endpoint_config.get('bookmark_type'),
                    data_key=endpoint_config.get('data_key', None),
                    body=endpoint_config.get('body', None),
                    id_fields=endpoint_config.get('id_fields'))

                update_currently_syncing(state, None)
                LOGGER.info('Synced: {}, total_records: {}'.format(
                    stream_name, total_records))
                LOGGER.info('FINISHED Syncing: {}'.format(stream_name))
Exemple #44
0
# Format data
DailyPrices_Data = pd.read_csv("DailyPrices.csv", infer_datetime_format=True)
DailyPrices_Data.columns = ['Date', 'Price (Dollars per Million Btu)']
DailyPrices_Data = DailyPrices_Data.drop([0,1], axis=0)
DailyPrices_Data['Date'] = pd.to_datetime(DailyPrices_Data['Date'])
DailyPrices_Data = DailyPrices_Data.reset_index(drop=True)

# Save to CSV
DailyPrices_Data.to_csv('Data/DailyPrices.csv')


# Monthly Prices CSV
# Download data from site
MonthPrice_URL = 'http://www.eia.gov/dnav/ng/hist_xls/RNGWHHDm.xls'
urllib.request.urlretrieve(MonthPrice_URL, "MonthlyPrices.xls")

# Convert xls to csv
MonthPrice_XLS = pd.read_excel('MonthlyPrices.xls', 'Data 1')
MonthPrice_XLS.to_csv('MonthlyPrices.csv', index=None, header=True)

# Format Data
MonthlyPrices_Data = pd.read_csv("MonthlyPrices.csv", infer_datetime_format=True)
MonthlyPrices_Data.columns = ['Date', 'Price (Dollars per Million Btu)']
MonthlyPrices_Data = MonthlyPrices_Data.drop([0,1], axis=0)
MonthlyPrices_Data = MonthlyPrices_Data.reset_index(drop=True)
MonthlyPrices_Data['Date'] = (pd.to_datetime(MonthlyPrices_Data['Date']) + timedelta(-14))

# Save to CSV
MonthlyPrices_Data.to_csv('Data/MonthlyPrices.csv')

Exemple #45
0
from resources.item import Item, ItemList
from resources.store import Store, StoreList
from _datetime import timedelta
from flask.json import jsonify
import os

app = Flask(__name__)
app.secret_key = 'email'
api = Api(app)

app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',
                                                       'sqlite:///data.db')
app.config[
    'SQLALCHEMY_TRACK_MODIFICATIONS'] = False  #turn off flask sqlalchemy modification, use sqlalchemy track
app.config['JWT_AUTH_URL_RULE'] = '/Login'  #change the default /auth to /Login
app.config['JWT_EXPIRATION_DELTA'] = timedelta(
    seconds=1800)  #default is 5 mins
# app.config['JWT_AUTH_USERNAME_KEY']='email'  # config JWT auth key name to be 'email' instead of default 'username'

jwt = JWT(app, authenticate,
          identity)  #JWT create an endpoint /auth to return the jwt token


# customize JWT auth response, include user_id in response body
@jwt.auth_response_handler
def customized_response_handler(access_token, identity):
    return jsonify({
        'access_token': access_token.decode('utf-8'),
        'user_id': identity.id
    })

def parse(machine = "61282649"):

    #########################################################################
    ## izhodna datoteka

##    ## leta, meseci, dnevi, ki jih preberemo iz filov. Manjkajoce izpusti.
##    config = ([2015],
##               {2015:[11],2016:[1]},
##               {10:31, 11:27, 12:31, 1:31})
    #########################################################################

    product_plans = parseXmlFiles(plan_path)
    
    print(str(product_plans))
    
    scrap_reports = None
    scrap_fname = dir_name + 'scrap_reports-fixdates-rates.p'
    if os.path.isfile(scrap_fname):
        pin = open(scrap_fname, 'rb')
        scrap_reports = pickle.load(pin)
        pin.close()
    else:
        scrap_reports = parseCsvFiles(showMissing=True, path=scrap_path)
        pout = open(scrap_fname, 'wb')
        pickle.dump(scrap_reports, pout)
        pout.flush()
        pout.close()
        
    scrap_types = get_scrap_types()
    
    header_offset = 5
    row_offset = header_offset + 3
    col_offset = 2

    ## izmene. II: 6-14h, III: 14-22h, I: 22-6h
    ## (v planu dela je to edino smiselno)

    header_row = None
    result = []

    files = listdir(readings_path)
    for file in files:
        
        if not file.lower().endswith('csv'):
            continue
        
        print(file)
        
        fin = open(readings_path + file, 'r')
        reader = csv.reader(fin, delimiter=',', quotechar='"')
        rows = [row for row in reader]
        
        n_rows = len(rows)
        n_cols = len(rows[0])
        
        from_to_cell = rows[3][0]
        from_to_str = from_to_cell
        from_to_spl = from_to_str.split(' - ')
        
        start_time = datetime.datetime.strptime(from_to_spl[0], "%d.%m.%y %H:%M")
        end_time = datetime.datetime.strptime(from_to_spl[1], "%d.%m.%y %H:%M")
        
        print('Parsing readings from ' + str(start_time) + ' to ' + str(end_time))
        
        if header_row is None:
                
            header_row = ['date', 'product', 'shift', 'timestamp', 'total parts', 'total scrap'] + scrap_types
            header_row.append('total scrap (%)')
            for scrap_type in scrap_types:
                header_row.append(scrap_type + ' (%)')
                    
            for col_n in range(col_offset, n_cols):
                h0 = rows[header_offset][col_n]
                h1 = rows[header_offset+1][col_n]
                h2 = rows[header_offset+2][col_n]
                
                if h0 is None:
                    h0 = ''
                if h1 is None:
                    h1 = ''
                if h2 is None:
                    break
                    
                header_row.append(h0 + h1 + h2)
        
        for row_n in range(row_offset, n_rows):
            out_row = []
            
            date_str = rows[row_n][0]
            time_str = rows[row_n][1]
            
            if date_str is None:
                break
            
            if date_str == 'Nastavljena vrednost':
                row_n += 1
                continue
            
            reading_time = datetime.datetime.strptime(date_str + ' ' + time_str, "%d.%m.%y %H:%M:%S")
            hour = reading_time.hour
            timestamp = get_timestamp(reading_time)
            
            plan_date_str = None
            if hour < 6:
                plan_date_str = (reading_time - timedelta(days=1)).strftime('%d.%m.%Y')
            else:
                plan_date_str = reading_time.strftime('%d.%m.%Y')
            
            shift_n = get_shift(reading_time)
            
            if not plan_date_str in product_plans:
                print('Don\'t have a plan for date: ' + plan_date_str)
                continue
            
            product_plan = product_plans[plan_date_str]   
            
            # get the correct product
            
            possible_products = product_plan[shift_n]
            product = None
            for product_conf in possible_products:
                product_end_hour = product_conf['end']
                
                if shift_n == 3:
                    if hour <= 23:
                        if hour <= product_end_hour or product_end_hour <= 6:
                            product = product_conf['product']
                            break
                    elif hour < product_end_hour:
                        product = product_conf['product']
                        break
                elif hour < product_end_hour:
                    product = product_conf['product']
                    break
            
            product = product.strip()
            
            if product == 'prazno':
                continue    # TODO can I do anything better here???
            if product is None:
                raise ValueError('Unable to find product! Date: ' + plan_date_str)
            if not product in plan_to_scrap_h:
                raise ValueError('Product ' + product + ' not in hash! Date: ' + plan_date_str)
            
            if not plan_date_str in scrap_reports:
                print('Scrap report for date ' + plan_date_str + ' missing!')
                continue
            
            # extract the scrap report
            scrap_report = scrap_reports[plan_date_str]
            product_scrap = plan_to_scrap_h[product]
            
            if product_scrap is None:
                raise ValueError('Product ' + product + ' not found!')
            
            if isinstance(product_scrap, list):
                for ps in product_scrap:
                    if ps in scrap_report[shift_n]:
                        product_scrap = ps
                        break
            
            out_row.append(date_str + ' ' + time_str)
            out_row.append(product)
            out_row.append(shift_n)
            out_row.append(timestamp)
            
            if not product_scrap in scrap_report[shift_n]:
                print('Scrap report missing for car: ' + product_scrap + '! Date: ' + plan_date_str)
                continue
            
            scraps = scrap_report[shift_n][product_scrap]
            good_parts = scraps['good_parts']
            total_scrap = 0
            for scrap_type in scrap_types:
                total_scrap += scraps[scrap_type]
                
            total_parts = good_parts + total_scrap
            
            out_row.append(total_parts)
            out_row.append(total_scrap)
            for scrap_type in scrap_types:
                out_row.append(scraps[scrap_type])
            
            out_row.append(float(total_scrap) / total_parts if total_parts != 0 else 0)
            for scrap_type in scrap_types:
                out_row.append(float(scraps[scrap_type]) / total_parts if total_parts != 0 else 0)
            
            for col_n in range(col_offset, n_cols):
                value = rows[row_n][col_n]
                
                if value is None or value == '':
                    value = 0
                else:
                    value = float(value)
                
                out_row.append(value)

            result.append(out_row)
        fin.close()
        
    print('Removing duplicates ...')
    timestamp_col = 3
    timestamp_h = { row[timestamp_col]: row for row in result }
    result = [timestamp_h[key] for key in timestamp_h]
    print('Sorting ...')
    result.sort(key = lambda row: row[timestamp_col])
        
    print('Writing to output file ...')
    fout = open(fout_name, 'w')
    fout.write(','.join(['"' + str(val) + '"' for val in header_row]))
    for row_n, out_row in enumerate(result):
        line = ','.join([str(val) for val in out_row])
        fout.write('\n' + line)
            
    fout.flush()
    fout.close()
    
    print('Done!')
Exemple #47
0
import sys
import glob
import numpy as np

#NX = 27
#NY = 29
NX = 36
NY = 37

dir_input = "/dados/radar/saoroque/ppi/level1_tamanduatei/2015/01"
dir_output = "/dados/radar/saoroque/ppi/level1_tamanduatei_txt/2015/01"
start = datetime.strptime("201501312310", "%Y%m%d%H%M")
end = datetime.strptime("201501312350", "%Y%m%d%H%M")

datehour = start
while datehour <= end:
    pattern1 = datetime.strftime(datehour, "*%Y%m%d_%H*.dat")
    files = glob.glob(os.path.join(dir_input, pattern1))
    nfiles = len(files)
    for file in sorted(files):
        filename = os.path.basename(file)
        data = np.fromfile(file.strip(), dtype=np.float32).reshape((NY, NX), order='C')
        np.place(data, data==255, -99)
        #np.place(data, data<0, 0.0)

        txt_file = os.path.join(dir_output, filename.replace(".dat", ".txt"))
        np.savetxt(txt_file, data, fmt='%03d')

    #datehour = datehour + timedelta(hours=1)
    datehour = datehour + timedelta(minutes=10)
 def get_by_trimesters(self, year, trimester):
     min_date = datetime(year, min(trimester), 1)
     max_date = datetime(year, max(trimester)+1, 1)
     DD = timedelta(days=1)
     max_date = max_date - DD
     return self.filter(date__gte=min_date, date__lte=max_date).order_by('-date')
    def build_waves_by_new_cases(self, country: CovidCountry):

        waves = []
        wave_start_date = None
        wave_start_count = 0
        wave_end_date = None
        wave_end_count = 0
        previous_day = None
        # plt.figure(figsize=(16,9))
        for i, day in self.data.iterrows():
            if (i != 0) and pd.notnull(day["rolling_new_cases"]):
                #as we are going day by day the denominator is constant and is equal to 1
                angle = np.rad2deg(
                    np.arctan2(
                        day["rolling_new_cases"] -
                        previous_day["rolling_new_cases"], 1))
                #color = None

                if angle < self.DECREASING_ANGLE:
                    if wave_start_date and wave_end_date is None and wave_end_count == 0:
                        wave_end_date = day["date"]
                    if wave_start_date and wave_end_date and wave_end_count < self.DECREASING_FOR:
                        wave_end_count += 1
                    if wave_start_count < self.INCREASING_FOR:
                        wave_start_count = 0
                        wave_start_date = None
                elif angle > self.INCREASING_ANGLE:
                    if wave_start_date is None and wave_start_count == 0:
                        wave_start_date = day["date"]
                    if wave_start_date and wave_start_count < self.INCREASING_FOR:
                        wave_start_count += 1
                    if wave_end_count < self.DECREASING_FOR:
                        wave_end_count = 0
                        wave_end_date = None

                if (wave_start_count == self.INCREASING_FOR
                        and wave_end_count == self.DECREASING_FOR):
                    mask_wave = ((self.data['date'] > wave_start_date) &
                                 (self.data['date'] <= wave_end_date))
                    mean_case_count_during_wave = self.data[mask_wave][
                        "rolling_new_cases"].mean()
                    if mean_case_count_during_wave > self.MINIMUM_MEAN_NEW_CASES_FOR_WAVE:
                        mask_wave_extended = (
                            (self.data['date'] > wave_start_date -
                             timedelta(days=self.WAVE_START_OFFSET)) &
                            (self.data['date'] <= wave_end_date +
                             timedelta(days=self.WAVE_END_OFFSET)))
                        waves.append(self.data[
                            mask_wave_extended
                            & self.data['rolling_new_cases'].notna()])
                    wave_start_count = 0
                    wave_start_date = None
                    wave_end_count = 0
                    wave_end_date = None

            if len(self.data.index) == (
                    i + 1
            ) and wave_start_count == self.INCREASING_FOR and wave_end_count < self.DECREASING_FOR:
                wave_end_date = day["date"]
                mask_wave = ((self.data['date'] > wave_start_date) &
                             (self.data['date'] <= wave_end_date))
                mean_case_count_during_wave = self.data[mask_wave][
                    "rolling_new_cases"].mean()
                if mean_case_count_during_wave > self.MINIMUM_MEAN_NEW_CASES_FOR_WAVE:
                    mask_wave_extended = (
                        (self.data['date'] > wave_start_date -
                         timedelta(days=self.WAVE_START_OFFSET)) &
                        (self.data['date'] <=
                         wave_end_date + timedelta(days=self.WAVE_END_OFFSET)))
                    waves.append(
                        self.data[mask_wave_extended
                                  & self.data['rolling_new_cases'].notna()])

            previous_day = day
        merged_waves = Wave.merge(waves)
        filtered_waves = []

        for wave in merged_waves:
            mean_case_count_during_wave = wave["rolling_new_cases"].mean()
            if mean_case_count_during_wave > self.MINIMUM_MEAN_NEW_CASES_FOR_WAVE:
                filtered_waves.append(
                    Wave.build_wave_from_dataframe(wave, country,
                                                   'rolling_new_cases'))

        self.waves_cases = filtered_waves

        # plt.title(country_iso)

        # plt.savefig(FIGURES_PATH + country_iso + ".png")
        # plt.close()
        return filtered_waves
    def downloadYahoo(ticker, intraRange=None, dailyDateRange=None, symbols=None, yahooIntra=stockConfig.yahooIntra,
                      yahooDaily=stockConfig.yahooDaily, yahooFundementals=stockConfig.yahooFundementals, track=0,
                      urlType=2, adjust=False):
        dataOut = []
        testFunc = None
        typ = 'get'

        if symbols is not None:
            values = ['timeStamp'] + symbols.Description.tolist()
            symbolStr = ''.join(symbols.Symbol)
            url = yahooFundementals + 's=' + ticker + '&f=' + symbolStr
            testFunc = 'any((lambda output=output : [any(pd.Series(output).str.contains(x)) for x in ' + str(
                re.split('\+', ticker)).replace('^', '') + '])())'
        elif intraRange is not None:
            url = yahooIntra + ticker + '/chartdata;type=quote;range=' + intraRange + '/csv'
        elif dailyDateRange is not None:
            if urlType == 1:
                dateFrom = dailyDateRange[0]
                dateTo = dailyDateRange[1]
                url = yahooDaily + ticker + '&d=' + str(dateTo.month) + '&e=' + str(dateTo.day) + '&f=' + str(
                    dateTo.year) + '&g=d&a=' + str(dateFrom.month) + '&b=' + str(dateFrom.day) + \
                      '&c=' + str(dateFrom.year) + '&ignore=.csv'
            elif urlType == 2:
                startEpoch = int(datetime.timestamp(dailyDateRange[0]))
                endEpoch = int(datetime.timestamp(dailyDateRange[1]))
                baseUrl = stockConfig.yahooBaseURL
                argsHist = '%s?period1=%s&period2=%s&interval=1d&events=history' % (ticker, startEpoch, endEpoch)
                url = baseUrl + argsHist
                typ = 'post'

        # response = requests.get(url)
        data, track = tryRequest(url=url, typ=typ, track=track, testFunc=testFunc, timeOut=8)
        data = np.array(pd.DataFrame(data).replace({'!"': ''}, regex=True).iloc[:, 0])

        try:
            if data.shape[0] == 1:
                return None, track
        except:
            pass

        try:
            if ('error' in data[2]) | ('404 Not Found' in data[1]):
                return None, track
        except:
            print(data)
            input()

        if symbols is not None:
            startIndex = 0

        elif intraRange is not None:
            values = np.array(re.split(',|:', data[np.array(['values' in x for x in data])][0]))[1:]
            lastValue = data[np.array([values[len(values) - 1] in x for x in data])]
            startIndex = np.where(data == lastValue[len(lastValue) - 1])[0][0] + 1
        else:
            values = np.array(re.split(',|:', data[0]))
            startIndex = 1

        for i in range(startIndex, data.shape[0]):
            if (len(data[i]) > 0):
                splitRow1 = re.split(''',(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', data[i])
                if symbols is not None:
                    # timeStamp = np.datetime64(datetime.now().date())

                    # runs at 4am, thus prices apply to timestamp day before
                    timeStamp = str(datetime.now().date() - timedelta(days=1))
                    splitRow1 = [timeStamp] + splitRow1
                else:
                    try:
                        timeStamp = datetime.fromtimestamp(int(splitRow1[0]))
                    except:
                        try:
                            splitRow = splitRow1.copy()
                            timeStamp = splitRow[0]
                        except Exception as e:
                            kk = e
                            print(kk)
                    splitRow1[0] = str(timeStamp)
                dataOut.append(splitRow1)

        dataOut = pd.DataFrame(dataOut)
        if (dataOut.shape == (0, 0)):
            return None, track

        values[0] = 'timestamp'
        values = np.array([x.lower() for x in values])
        try:
            dataOut.columns = values
        except:
            print('columns didnt work - here')
        dataOut = dataOut.replace('null', np.NaN)

        if symbols is None:
            dataOut = dataOut.replace('N/A', np.NaN)
            dataOut = dataOut.iloc[~np.isnan(np.nansum(dataOut.iloc[:, 1:5].astype(float), axis=1)) & (
                        np.nansum(dataOut.iloc[:, 1:5].astype(float), axis=1) != 0), :]
            cols = ['timestamp', 'open', 'close', 'high', 'low', 'volume']
            try:
                dataOut = dataOut[cols + values[np.array((~pd.Series(values).isin(cols)).tolist())].tolist()]
            except Exception as e:
                print(dataOut)
                print(e)
            dataOut.columns = pd.Series(dataOut.columns).replace({'adj close': 'adj_close'}, regex=True)
            dataOut.iloc[:, 1:len(dataOut.columns)] = dataOut.iloc[:, 1:len(dataOut.columns)].astype(np.float)
            if (dataOut.shape == (0, 0)):
                return None, track
            dataOut['ticker'] = ticker

            try:
                dataOut = dataOut.drop_duplicates(subset='timestamp', keep='first')
            except Exception as e:
                print(dataOut)
                print(e)

            if adjust & (dataOut.shape[0] > 3):
                startEpoch = int(datetime.timestamp(dailyDateRange[0]))
                endEpoch = int(datetime.timestamp(dailyDateRange[1]))
                baseUrl = 'https://query1.finance.yahoo.com/v7/finance/download/'
                argsDiv = '%s?period1=%s&period2=%s&interval=1d&events=div' % (ticker, startEpoch, endEpoch)
                argsSplit = '%s?period1=%s&period2=%s&interval=1d&events=split' % (ticker, startEpoch, endEpoch)
                typ = 'post'

                testDiv = False
                cnt = 0
                cnt1 = 0
                while (testDiv == False) & (cnt <= 20) & (cnt1 <= 10):
                    dataDiv, _ = tryRequest(url=baseUrl + argsDiv, typ=typ)
                    if pd.Series(dataDiv).str.contains(
                            'No data found, symbol may be delisted|Data doesn\'t exist for startDate').any():
                        cnt += 1
                    elif pd.Series(dataDiv).str.contains('error occurred while retrieving timeseries from redis').any():
                        cnt1 += 1
                    elif 'Error occurred while retrieving timeseries from Redis' in str(dataDiv):
                        cnt1 += 1
                    elif 'duplicate key' in str(dataDiv):
                        cnt1 += 1
                    elif pd.Series(dataDiv).str.contains('error occurred').any():
                        cnt1 += 1
                    elif pd.Series(dataDiv).str.contains('HTTP ERROR: 503').any():
                        cnt1 += 1
                    elif (dataDiv.shape == (2,)):
                        cnt1 += 1
                    else:
                        testDiv = True
                if testDiv == False:
                    dataDiv = np.array(['Date,Dividends', ''])

                testSplit = False
                cnt = 0
                cnt1 = 0
                cnt2 = 0
                while (testSplit == False) & (cnt <= 20) & (cnt1 <= 10) & (cnt2 <= 10):
                    dataSplit, _ = tryRequest(url=baseUrl + argsSplit, typ=typ)
                    if pd.Series(dataSplit).str.contains(
                            'No data found, symbol may be delisted|Data doesn\'t exist for startDate').any():
                        cnt += 1
                    elif pd.Series(dataSplit).str.contains(
                            'error occurred while retrieving timeseries from redis').any():
                        cnt1 += 1
                    elif 'Error occurred while retrieving timeseries from Redis' in str(dataSplit):
                        cnt1 += 1
                    elif 'duplicate key' in str(dataSplit):
                        cnt1 += 1
                    elif pd.Series(dataSplit).str.contains('error occurred').any():
                        cnt1 += 1
                    elif pd.Series(dataSplit).str.contains('HTTP ERROR: 503').any():
                        cnt1 += 1
                    elif dataSplit.shape == (2,):
                        cnt2 += 1
                    else:
                        testSplit = True
                if testSplit == False:
                    dataSplit = np.array(['Date,stock splits', ''])

                ##adjusted data effectivly ignores splits going back, so we just adjust for dividends, only relevant if trying to transform open close etc of old data,
                ##new data already adjusts for splits in open etc
                dataExtra = {}
                for k in ['dataDiv', 'dataSplit']:
                    cols = pd.Series(eval(k)).str.split(',', expand=True).iloc[0, :].values
                    cols = np.array([x.lower() for x in cols])
                    dataExtra[k] = pd.Series(eval(k)).str.split(',', expand=True).iloc[1:, :]
                    dataExtra[k].columns = cols
                    try:
                        dataExtra[k].sort_values(by='date', inplace=True)  #
                    except:
                        print(dataExtra[k])
                        dataExtra[k].sort_values(by='date', inplace=True)
                    dataExtra[k] = dataExtra[k][dataExtra[k].date != ''].reset_index(drop=True)

                dataOut = dataOut.reset_index(drop=True)
                locDate = np.where(dataOut == np.inf)[0]
                getRidDate = dataOut.timestamp.values.astype('datetime64[D]').min()
                if locDate.shape[0] > 0:
                    getRidDate = dataOut.timestamp[np.where(dataOut == np.inf)[0].max()]
                stupidSplit = dataExtra['dataSplit'].date[dataExtra['dataSplit']['stock splits'].str.contains('/0') | (
                            dataExtra['dataSplit']['stock splits'].str[:2] == '0/')].values
                if len(stupidSplit) > 0:
                    getRidDate = np.max(np.array([stupidSplit.max(), getRidDate]).astype('datetime64[D]'))
                uniqueDates = np.unique(np.concatenate(([dataOut.timestamp.unique(),
                                                         dataExtra['dataDiv']['date'].unique(),
                                                         dataExtra['dataSplit']['date']]))).astype('datetime64[D]')
                uniqueDates.sort()
                uniqueDates = uniqueDates[uniqueDates > np.array([getRidDate]).astype('datetime64[D]')]
                # insert any missing dates that pay splits or dividends
                dataOut = dataOut[
                    dataOut.timestamp.values.astype('datetime64[D]') > np.array([getRidDate]).astype('datetime64[D]')]

                # if there is a split or div but no new data afterward, the getRid date will leave us with no data
                if dataOut.shape[0] == 0:
                    return None, track

                dataOut.index = dataOut.timestamp.astype('datetime64[D]').copy()
                dataOut = dataOut.reindex(uniqueDates)
                ## need to backfill for missing dates for dividends and splits, as supposed to use tomorrows data for calculating backwards adjustment
                dataOut = dataOut.fillna(method='backfill')
                dataOut.timestamp = dataOut.index.values.astype('datetime64[D]').astype(str)
                dataOut.reset_index(drop=True, inplace=True)

                dataExtra['dataDiv'] = dataExtra['dataDiv'][
                    dataExtra['dataDiv']['date'].astype('datetime64[D]') > getRidDate].reset_index(drop=True)
                dataExtra['dataSplit'] = dataExtra['dataSplit'][
                    dataExtra['dataSplit']['date'].astype('datetime64[D]') > getRidDate].reset_index(drop=True)

                scalingFactor = 1
                scalingFactorCum = 1
                dataAdjust = dataOut.sort_values(by='timestamp', ascending=False)
                div = pd.DataFrame(np.zeros(dataAdjust.shape[0]), index=dataAdjust.timestamp)
                div.loc[dataExtra['dataDiv']['date']] = dataExtra['dataDiv']['dividends'].values
                div.iloc[:, :] = div.iloc[:, :].astype(float)

                splits = pd.DataFrame(np.zeros(dataAdjust.shape[0]), index=dataAdjust.timestamp)
                splits.loc[dataExtra['dataSplit']['date']] = dataExtra['dataSplit']['stock splits'].values

                adj_close = np.zeros(dataAdjust.shape[0])
                try:
                    adj_close[0] = dataAdjust.close.values[0]
                except:
                    broken = 1
                scalar = 1

                for i in range(1, dataAdjust.shape[0]):
                    if div.iloc[i - 1].values != 0:
                        scalingFactorCum = scalingFactorCum * scalingFactor
                        scalingFactor = (adj_close[i - 1] + div.iloc[i - 1].values) / adj_close[i - 1]
                    if (splits.iloc[i - 1].values != 0):
                        if (eval(splits.iloc[i - 1].values[0]) == 0) & (splits.iloc[i - 1].values[0] != 0):
                            return None, track
                        scalar = 1 / eval(splits.iloc[i - 1].values[0]) * scalar
                    closePreAdj = dataAdjust.close.iloc[i] / scalingFactorCum
                    adj_close[i] = closePreAdj / scalingFactor * scalar

                dataOut['adj_close'] = adj_close[::-1]
                dataOut = dataOut.replace(np.NaN, 'N/A')
                dataOut.timestamp = dataOut.timestamp.astype('datetime64[D]')
                dataOut = dataOut.sort_values(by='timestamp')
                dataOut.timestamp = dataOut.timestamp.astype(str)


        else:
            # use dictionary to replace all instances of string in dataFrame
            dataOut = dataOut.replace({'"': ''}, regex=True)
            dataOut[['dividend pay date', 'ex-dividend date', 'last trade date', ]] = \
                dataOut[['dividend pay date', 'ex-dividend date', 'last trade date']].apply(
                    lambda x: x.apply(lambda y: np.datetime64(datetime.strptime(y, '%m/%d/%Y').date()) \
                        if (y != 'N/A') & ('/' in y) & (len(y) <= 10) else 'N/A'))

            dataOut['last trade time'] = dataOut['last trade time'].apply(
                lambda x: str((datetime.strptime(pd.Series(x).replace({'pm': '', 'am': ''}, regex=True)[0], '%H:%M') + \
                               timedelta(hours=(12 if 'pm' in x else 0))).time()) if (x != 'N/A') & (
                            ('am' in x) | ('pm' in x)) else 'N/A')

            dataOut[['revenue', 'market capitalization', 'ebitda']] = \
                dataOut[['revenue', 'market capitalization', 'ebitda']].apply(
                    lambda y: [float(x.replace('M', '')) * 1e6 if 'M' in x else \
                                   float(x.replace('B', '')) * 1e9 if ('B' in x) else float(x) if (x != 'N/A') & (
                                               x is not None) else x for x in y])

            dataOut[['change in percent', 'percent change from 200 day moving average',
                     'percent change from 50 day moving average', 'percent change from 52 week low', \
                     'percent change from 52 week high']] = \
                dataOut[['change in percent', 'percent change from 200 day moving average',
                         'percent change from 50 day moving average', \
                         'percent change from 52 week low', 'percent change from 52 week high']].apply(
                    lambda x: [float(y) / 100 if (y != 'N/A') & ('%' in y) else \
                                   y for y in x.replace({'%': '', '\+': ''}, regex=True)])

            dataOut['change'] = [np.float(x) if x != 'N/A' else x for x in dataOut.change]
            dataOut[['last trade date', 'trade date']] = dataOut[['last trade date', 'trade date']].apply(
                lambda x: x.apply(str))
            dataOut.columns = pd.Series(dataOut.columns).replace(
                {' ': '_', '-': '_', '&': 'and', '\(': '', '\)': '', '/': '_to_', '\x92': ''}, regex=True)

        if dataOut.shape[0] == 0:
            return None, track

        return dataOut, track
Exemple #51
0
def HTML_Execution_Summary_Footer(self):

    try:

        oFileWriter = open(self.strExecSummaryHTMLFilePath, 'a')
        oFileWriter.write("</tbody>\n")
        oFileWriter.write("</table>\n")
        oFileWriter.write("<table>\n")
        oFileWriter.write("<script type='text/javascript'>\n")
        oFileWriter.write("window.onload = function () {\n")
        oFileWriter.write("CanvasJS.addColorSet('chartshades',\n")
        oFileWriter.write("[//colorSet Array\n")
        oFileWriter.write("'lightgreen',\n")
        oFileWriter.write("'red'           \n")
        oFileWriter.write("]);\n")
        oFileWriter.write("var chart = new CanvasJS.Chart('chartContainer',\n")
        oFileWriter.write("{\n")
        oFileWriter.write("colorSet: 'chartshades',\n")
        oFileWriter.write("zoomEnabled: true,\n")
        oFileWriter.write("title:{\n")
        oFileWriter.write("fontColor: '#C6FFEC',\n")
        oFileWriter.write("text: 'Execution Status'\n")
        oFileWriter.write("},\n")
        oFileWriter.write("animationEnabled: true,\n")
        oFileWriter.write("backgroundColor: 'black',\n")
        oFileWriter.write("legend:{\n")
        oFileWriter.write("fontColor: '#C6FFEC',\n")
        oFileWriter.write("verticalAlign: 'bottom',\n")
        oFileWriter.write("horizontalAlign: 'center'\n")
        oFileWriter.write("},data: [{        \n")
        oFileWriter.write("indexLabelFontSize: 20,\n")
        oFileWriter.write("indexLabelFontFamily: 'Monospace',     \n")
        oFileWriter.write("indexLabelFontColor: '#C6FFEC', \n")
        oFileWriter.write("indexLabelLineColor: '#C6FFEC',     \n")
        oFileWriter.write("indexLabelPlacement: 'auto',\n")
        oFileWriter.write("type: 'pie',       \n")
        oFileWriter.write("showInLegend: true,\n")
        oFileWriter.write(
            "toolTipContent: '{y} - <strong>#percent%</strong>',\n")
        oFileWriter.write("dataPoints: [\n")
        if not self.intPassTCCount == 0:
            oFileWriter.write("{  y: " + str(self.intPassTCCount) +
                              ", legendText:'PASS', indexLabel: '{y}' },\n")
        else:
            oFileWriter.write("{  y: " + str(self.intPassTCCount) +
                              ", legendText:'PASS'},\n")
        if not self.intFailTCCount == 0:
            oFileWriter.write("{  y: " + str(self.intFailTCCount) +
                              ", legendText:'FAIL' , indexLabel: '{y}'}\n")
        else:
            oFileWriter.write("{  y: " + str(self.intFailTCCount) +
                              ", legendText:'FAIL'}\n")
        oFileWriter.write("]}]});chart.render();}\n")
        oFileWriter.write("</script>\n")
        oFileWriter.write(
            "<script type='text/javascript' src='./Screenshot/canvasjs.min.js'></script>\n"
        )
        oFileWriter.write(
            "<tr  class='content' ><td><div id='chartContainer' style='height: 300px; width: 100%;'></div></td></tr></table>\n"
        )

        oFileWriter.write("<table id='footer'>\n")
        oFileWriter.write("<colgroup>\n")
        oFileWriter.write("<col style='width: 25%' />\n")
        oFileWriter.write("<col style='width: 25%' />\n")
        oFileWriter.write("<col style='width: 25%' />\n")
        oFileWriter.write("<col style='width: 25%' />\n")
        oFileWriter.write("</colgroup>\n")

        oFileWriter.write("<tfoot>\n")
        oFileWriter.write("<tr class='heading'>\n")

        intExecEndTime = time.monotonic()
        strDuration = str(
            timedelta(seconds=intExecEndTime - self.intExecStartTime))
        strDuration = self.getDuration(strDuration)
        oFileWriter.write("<th colspan='4'>Total Duration: " + strDuration +
                          "</th>\n")
        oFileWriter.write("</tr>\n")
        oFileWriter.write("<tr class='subheading'>\n")
        oFileWriter.write("<td class='pass'>&nbsp;Tests passed</td>\n")
        oFileWriter.write("<td class='pass'>&nbsp;: {}</td>\n".format(
            self.intPassTCCount))
        oFileWriter.write("<td class='fail'>&nbsp;Tests failed</td>\n")
        oFileWriter.write("<td class='fail'>&nbsp;: {}</td>\n".format(
            self.intFailTCCount))
        oFileWriter.write("</tr>\n")
        oFileWriter.write("</tfoot>\n")
        oFileWriter.write("</table>\n")
        oFileWriter.write("</body>\n")
        oFileWriter.write("</html>\n")

        #Always close files.
        oFileWriter.close()

        if os.path.exists(self.strCurrentTXTFolder +
                          'ExecutionInProgress.txt'):
            os.remove(self.strCurrentTXTFolder + 'ExecutionInProgress.txt')
    except IOError as e:
        print("I/O error({0}): {1}".format(e.errno, e.strerror))
Exemple #52
0
from _datetime import datetime
from _datetime import timedelta
today = datetime.today()
print(today)
today = today - timedelta(hours=24)
print(datetime.today() > today)
print( (datetime.today() - today).total_seconds())
print(datetime.today() - today)

import datetime
from datetime import timedelta
# in_str = '2016-07-07'
# out_str = '2016-07-08'
# in_array = in_str.split('-')
# out_array = out_str.split('-')
# checkin_time = datetime.datetime(int(in_array[0]),int(in_array[1]),int(in_array[2])).date()
# checkout_time = datetime.datetime(int(out_array[0]),int(out_array[1]),int(out_array[2])).date()
# interval =  (checkout_time-checkin_time).days
# aDay = timedelta(days=1)
#
# for i in range(1,interval+1):
#     checkin_time += aDay
#     print(checkin_time.day)


# str =datetime.datetime.now().strftime('%Y%m%d%H')[2:]
# print(str)
# import django.utils.timezone

# from django.utils.dates import WEEKDAYS
# now = datetime.now()
Exemple #53
0
# App the REST framework url conf
ROOT_URLCONF = 'django_rest_role_jwt.urls'

# REST framework settings
REST_FRAMEWORK = {
    'DEFAULT_PERMISSION_CLASSES':
    ('rest_framework.permissions.IsAuthenticated'),
    'DEFAULT_AUTHENTICATION_CLASSES':
    ('rest_framework_simplejwt.authentication.JWTAuthentication',
     'rest_framework.authentication.SessionAuthentication',
     'rest_framework.authentication.BasicAuthentication'),
}

# Configure the JWT settings
SIMPLE_JWT = {
    'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
    'REFRESH_TOKEN_LIFETIME': timedelta(days=14),
    'ROTATE_REFRESH_TOKENS': True,
    'BLACKLIST_AFTER_ROTATION': False,
    'ALGORITHM': 'HS256',
    'SIGNING_KEY': SECRET_KEY,
    'VERIFYING_KEY': None,
    'AUTH_HEADER_TYPES': ('JWT', ),
    'USER_ID_FIELD': 'id',
    'USER_ID_CLAIM': 'user_id',
    'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken', ),
    'TOKEN_TYPE_CLAIM': 'token_type',
}

MIDDLEWARE = [
    'django.middleware.security.SecurityMiddleware',