コード例 #1
0
def nextLowTariffZone(time, pricesDF):
    # ***** DEFINE NEXT LOW TARIFF ZONE *****
    # READ IN START AND END TIMES OF LOW TARIFF ZONE
    lowTariffStartHr = getData(pricesDF, 'startGreenZone')
    lowTariffEndHr = getData(pricesDF, 'endGreenZone')

    # IF LOW TARIFF ZONE RUNS OVERNIGHT:
    if (readTime(lowTariffStartHr) > readTime(lowTariffEndHr)):
        # LOW TARIFF START = CURRENT DAY + LOW TARIFF ZONE START TIME
        lowTariffStart = readTime(str(time.date()) + " " + lowTariffStartHr)
        # LOW TARIFF END = NEXT DAY + LOW TARIFF END TIME
        lowTariffEnd = readTime(str(time.date() + dt.timedelta(days=1)) + " " + lowTariffEndHr)

    # IF LOW TARIFF ZONE DOESN'T RUN OVERNIGHT, CONSIDER CASE WHERE TIME IS PAST MIDNIGHT
    else:
        # CALCULATE DIFFERENCE LOW TARIFF ZONE START TIME AND MIDNIGHT
        arbGreenStart = dt.datetime.combine(dt.date.today(), readTime(lowTariffStartHr))
        arbMidnight = dt.datetime.combine(dt.date.today(), readTime("00:00:00"))
        gap = arbGreenStart - arbMidnight

        # LOW TARIFF START = (TIME-GAP) + 1 DAY + LOW TARIFF ZONE START TIME
        lowTariffStart = readTime(str((time-gap).date() + dt.timedelta(days=1)) + " " + lowTariffStartHr)
        # LOW TARIFF END = (TIME-GAP) + 1 DAY + LOW TARIFF ZONE END TIME
        lowTariffEnd = readTime(str((time-gap).date() + dt.timedelta(days=1)) + " " + lowTariffEndHr)

    return lowTariffStart, lowTariffEnd
コード例 #2
0
def construct_snapshots():
    print("Constructing snapshots.")
    jams_df = pd.read_csv("data/jams.csv")

    states = {}

    for i, r in jams_df.iterrows():
        if i % 5000 == 0:
            print("{} of {}".format(i, len(jams_df['street'])))
        time = datetime.fromtimestamp(r['pub_millis'] / 1000)
        hour = time.hour
        day_of_week = time.weekday()
        weather = weather_dict[(time.date(), hour)]
        path = literal_eval(r['line'])
        key = (time.date(), time.hour)
        if key not in states:
            states[key] = [0] * array_len

        covered_path = cover_path(path)
        for s in covered_path:
            # we are just going to throw out our data that didn't get caught by our grid
            # sorry :(
            # this is a tiny part (~0.5%) of the data anyways
            if s in streets:
                states[key][streets[s]] = 1
    print("Done constructing snapshots.")
    return states
コード例 #3
0
def take_remote_snapshot(host, opera, finput):
    print 'loading  in = ' + host + ' ; ' + opera
    list = open(finput, 'r').readlines()
    url1 = list[0]
    url2 = list[1]
    timestamp = date('%Y-%m-%d-%H-%M-%S')
    dir = './screenshots-tmp/' + os.path.basename(finput) + '-' + timestamp
    os.makedirs(dir)
    f = open(dir + '/' + os.path.basename(finput) + '-' + timestamp + '.txt',
             'a')
    timestamp = date('%Y-%m-%d-%H-%M-%S')
    f.write('\n' + timestamp + ' taking snapshots')
    driver1 = webdriver.Remote(host, webdriver.DesiredCapabilities.FIREFOX)
    driver1.get(url1)
    driver2 = webdriver.Remote(opera, webdriver.DesiredCapabilities.OPERA)
    driver2.get(url2)
    format = '%Y-%m-%d-%H-%M-%S-%f'
    today = datetime.datetime.today()
    ts = (today.strftime(format))
    timestamp = date('%Y-%m-%d-%H-%M-%S')
    f.write('\n' + dir + '/ff36-' + ts + '.png' + ' ' + url1)
    f.write(dir + '/op12-' + ts + '.png' + ' ' + url2)
    driver1.get_screenshot_as_file(dir + '/ff36-' + ts + '.png')
    driver2.get_screenshot_as_file(dir + '/op12-' + ts + '.png')
    driver1.quit()
    driver2.quit()
    f.write(timestamp + ' snapshots taken')
    f.close()
    print 'snapshots taken'
コード例 #4
0
def stock_crawler(targets):
    
    clear_output(wait=True)
    
    # 組成stock_list
    stock_list = '|'.join('tse_{}.tw'.format(target) for target in targets) 
    
    # query data
    query_url = "http://mis.twse.com.tw/stock/api/getStockInfo.jsp?ex_ch="+ stock_list
    data = json.loads(urlopen(query_url).read())

    # 過濾出有用到的欄位
    columns = ['c','n','z','tv','v','o','h','l','y']
    df = pd.DataFrame(data['msgArray'], columns=columns)
    df.columns = ['股票代號','公司簡稱','當盤成交價','當盤成交量','累積成交量','開盤價','最高價','最低價','昨收價']
    
    # 新增漲跌百分比
    df.iloc[:, [2,3,4,5,6,7,8]] = df.iloc[:, [2,3,4,5,6,7,8]].astype(float)
    df['漲跌百分比'] = (df['當盤成交價'] - df['昨收價'])/df['昨收價'] * 100
    
    # 紀錄更新時間
    time = datetime.datetime.now()  
    print("更新時間:" + str(time.hour)+":"+str(time.minute))
    
    # show table
    df = df.style.applymap(tableColor, subset=['漲跌百分比'])
    display(df)
    
    start_time = datetime.datetime.strptime(str(time.date())+'9:30', '%Y-%m-%d%H:%M')
    end_time =  datetime.datetime.strptime(str(time.date())+'13:30', '%Y-%m-%d%H:%M')
    
    # 判斷爬蟲終止條件
    if time >= start_time and time <= end_time:
        s.enter(1, 0, stock_crawler, argument=(targets,))
コード例 #5
0
ファイル: timely_price.py プロジェクト: x24870/stockCrawler
def stock_crawler(targets):
    
    clear_output(wait=True)
    
    # compose query url
    stock_list = '|'.join('tse_{}.tw'.format(target) for target in targets)
    
    # query data
    query_url = "http://mis.twse.com.tw/stock/api/getStockInfo.jsp?ex_ch="+ stock_list
    data = json.loads(urlopen(query_url).read())

    # filter needed columns
    columns = ['c','n','z','tv','v','o','h','l','y']
    df = pd.DataFrame(data['msgArray'], columns=columns)
    df.columns = ['股票代號','公司簡稱','當盤成交價','當盤成交量','累積成交量','開盤價','最高價','最低價','昨收價']
    df.to_csv('timely_price.csv')
    
    # adding up-down percentage
    df.iloc[:, [2,3,4,5,6,7,8]] = df.iloc[:, [2,3,4,5,6,7,8]].astype(float)
    df['漲跌百分比'] = (df['當盤成交價'] - df['昨收價'])/df['昨收價'] * 100
    
    # record updating time
    time = datetime.datetime.now()  
    print("更新時間:" + str(time.hour)+":"+str(time.minute))
    
    # show table
    df = df.style.applymap(tableColor, subset=['漲跌百分比'])
    display(df)
    
    start_time = datetime.datetime.strptime(str(time.date())+'9:30', '%Y-%m-%d%H:%M')
    end_time =  datetime.datetime.strptime(str(time.date())+'13:30', '%Y-%m-%d%H:%M')
    
    # The condition to terminate crawler
    if time >= start_time and time <= end_time:
       s.enter(1, 0, stock_crawler, argument=(targets,))
コード例 #6
0
def take_remote_snapshot(host, opera, finput):
 print 'loading  in = ' + host + ' ; ' + opera
 list = open(finput, 'r').readlines()
 url1 = list[0]
 url2 = list[1]
 timestamp = date('%Y-%m-%d-%H-%M-%S')
 dir = './screenshots-tmp/' + os.path.basename(finput) + '-' + timestamp
 os.makedirs(dir)
 f = open(dir + '/' + os.path.basename(finput) + '-' + timestamp + '.txt', 'a')
 timestamp = date('%Y-%m-%d-%H-%M-%S')
 f.write('\n' + timestamp + ' taking snapshots')
 driver1 = webdriver.Remote(host, webdriver.DesiredCapabilities.FIREFOX)
 driver1.get(url1)
 driver2 = webdriver.Remote(opera, webdriver.DesiredCapabilities.OPERA)
 driver2.get(url2)
 format = '%Y-%m-%d-%H-%M-%S-%f'
 today = datetime.datetime.today()
 ts = (today.strftime(format))
 timestamp = date('%Y-%m-%d-%H-%M-%S')
 f.write('\n' + dir + '/ff36-' + ts + '.png' + ' ' + url1)
 f.write(dir + '/op12-' + ts + '.png' + ' ' + url2)
 driver1.get_screenshot_as_file(dir + '/ff36-' + ts + '.png')
 driver2.get_screenshot_as_file(dir + '/op12-' + ts + '.png')
 driver1.quit()
 driver2.quit()
 f.write(timestamp + ' snapshots taken')
 f.close()
 print 'snapshots taken'
コード例 #7
0
def update_user_table(*, database_client, time: datetime.datetime,
                      foods_dict: dict, utterance: str, user_id: str):
    result = database_client.get_item(TableName='nutrition_users',
                                      Key={
                                          'id': {
                                              'S': user_id
                                          },
                                          'date': {
                                              'S': str(time.date())
                                          }
                                      })
    item_to_save = []
    if 'Item' in result:
        item_to_save = json.loads(result['Item']['value']['S'])
    item_to_save.append({
        'time': time.strftime('%Y-%m-%d %H:%M:%S'),
        'foods': foods_dict,
        'utterance': utterance
    })
    database_client.put_item(TableName='nutrition_users',
                             Item={
                                 'id': {
                                     'S': user_id,
                                 },
                                 'date': {
                                     'S': str(time.date())
                                 },
                                 'value': {
                                     'S': json.dumps(item_to_save),
                                 }
                             })
コード例 #8
0
    def __init__(self):

        # PKI Deployment "Mandatory" Command-Line Variables
        self.subsystem_name = None

        # Global dictionary variables
        self.mdict = {}
        self.slots = {}
        self.main_config = None
        self.user_config = None
        self.manifest_db = []

        self.identity = None
        self.namespace = None
        self.configuration_file = None
        self.instance = None
        self.directory = None
        self.file = None
        self.symlink = None
        self.war = None
        self.password = None
        self.fips = None
        self.hsm = None
        self.certutil = None
        self.modutil = None
        self.pk12util = None
        self.kra_connector = None
        self.security_domain = None
        self.servercertnick_conf = None
        self.systemd = None
        self.tps_connector = None
        self.config_client = None
        self.parser = None

        # Set installation time
        ticks = time.time()
        self.install_time = time.asctime(time.localtime(ticks))

        # Generate a timestamp
        self.log_timestamp = date('%Y%m%d%H%M%S', time.localtime(ticks))
        self.certificate_timestamp = date('%Y-%m-%d %H:%M:%S',
                                          time.localtime(ticks))

        # Obtain the architecture bit-size
        self.architecture = struct.calcsize("P") * 8

        # Retrieve hostname
        self.hostname = socket.getfqdn()

        # Retrieve DNS domainname
        self.dns_domainname = subprocess.check_output(["dnsdomainname"])
        self.dns_domainname = self.dns_domainname.decode('ascii').rstrip('\n')

        if not len(self.dns_domainname):
            self.dns_domainname = self.hostname
コード例 #9
0
ファイル: GraciasManelbot.py プロジェクト: ns2switch/TelBot
def get_day_shortname(time):
    today = datetime.utcnow().date()
    if time.date() == today:
        return 't'

    yesterday = today - timedelta(days=1)
    if time.date() == yesterday:
        return 'y'

    weekday = DAY_NAMES[time.weekday()]
    return weekday
コード例 #10
0
ファイル: __init__.py プロジェクト: tiran/pki
    def __init__(self):

        # PKI Deployment "Mandatory" Command-Line Variables
        self.subsystem_name = None

        # Global dictionary variables
        self.mdict = {}
        self.slots = {}
        self.main_config = None
        self.user_config = None
        self.manifest_db = []

        self.identity = None
        self.namespace = None
        self.configuration_file = None
        self.instance = None
        self.directory = None
        self.file = None
        self.symlink = None
        self.war = None
        self.password = None
        self.fips = None
        self.hsm = None
        self.certutil = None
        self.modutil = None
        self.pk12util = None
        self.kra_connector = None
        self.security_domain = None
        self.systemd = None
        self.tps_connector = None
        self.config_client = None
        self.parser = None
        self.nss_db_type = None

        # Set installation time
        ticks = time.time()
        self.install_time = time.asctime(time.localtime(ticks))

        # Generate a timestamp
        self.log_timestamp = date('%Y%m%d%H%M%S', time.localtime(ticks))
        self.certificate_timestamp = date('%Y-%m-%d %H:%M:%S', time.localtime(ticks))

        # Obtain the architecture bit-size
        self.architecture = struct.calcsize("P") * 8

        # Retrieve hostname
        self.hostname = socket.getfqdn()

        # Retrieve DNS domainname
        self.dns_domainname = subprocess.check_output(["dnsdomainname"])
        self.dns_domainname = self.dns_domainname.decode('ascii').rstrip('\n')

        if not len(self.dns_domainname):
            self.dns_domainname = self.hostname
コード例 #11
0
    def option_vol(self, time, option_type, S, K, C0, r=0.0015, d=0.0424):

        start = time.date()
        end = BMonthEnd().rollforward(start).date()
        t = (end - start).days / 365.0
        #  We need a starting guess for the implied volatility.  We chose 0.5
        #  arbitrarily.
        vol = 0.5

        epsilon = 1.0  #  Define variable to check stopping conditions
        abstol = 1e-4  #  Stop calculation when abs(epsilon) < this number

        i = 0  #  Variable to count number of iterations
        max_iter = 1e3  #  Max number of iterations before aborting

        while epsilon > abstol:
            #  if-statement to avoid getting stuck in an infinite loop.
            if i > max_iter:
                break
            i = i + 1
            orig = vol
            d1, d2 = self.d(vol, S, K, r, t)
            if option_type == 'CALL':
                function_value = self.call_price(vol, S, K, r, t, d, d1,
                                                 d2) - C0
            else:
                function_value = self.put_price(vol, S, K, r, t, d, d1,
                                                d2) - C0
            vega = S * norm.pdf(d1) * sqrt(t)
            vol = -function_value / vega + vol
            epsilon = abs(function_value)
        return vol
コード例 #12
0
def recordCSV(name="hehe"):
    time = datetime.datetime.now()
    row = [time.date(), time.hour, time.minute, time.second, time.microsecond]
    with open(name+'.csv', 'a', newline='') as file:
        writer = csv.writer(file, delimiter=',')
        writer.writerow(row)
        file.close()
コード例 #13
0
def runSimulation(startTime, runTime, carData, car_cols, allShiftsDF, pricesDF,
                  chargePtData, cp_cols, chargeCapacity, sim_cols, mph, mpkw,
                  algo):

    depot = []
    carDataDF = pd.DataFrame.from_records(carData, columns=car_cols)
    chargePtDF = pd.DataFrame.from_records(chargePtData, columns=cp_cols)
    simulationDF = pd.DataFrame(columns=sim_cols)
    for car in range(0, len(carDataDF)):
        if carDataDF.loc[car, 'inDepot']: depot.append(car)

    rcCount = 0  # INITIALISE A COUNTER FOR RAPID CHARGES
    time = startTime  # CHOOSE START TIME

    for i in range(0, runTime * chunks):
        if time.time() == startTime.time():
            day = str(time.date())
            shiftsByCar = getShiftsToday(carData, day, allShiftsDF)

        carDataDF, time, depot, simulationDF, chargePtD = inOutDepot(
            carDataDF, shiftsByCar, time, depot, simulationDF, chargePtDF)
        carDataDF, time, rcCount, simulationDF = decreaseBatt(
            carDataDF, shiftsByCar, time, rcCount, simulationDF, mph[i],
            mpkw[i])
        carDataDF, simulationDF, chargePtDF = simulations[algo](
            carDataDF, depot, shiftsByCar, time, chargeCapacity, simulationDF,
            chargePtDF, pricesDF)
        time = incrementTime(time)
    # print("No. of rapid charges: " + str(rcCount))

    sim = dfFunction(simulationDF)
    return styleDF(
        sim
    ), simulationDF  # second dataframe, 'sim', is for animation purposes
コード例 #14
0
ファイル: code.py プロジェクト: Gary757483617/bilibili_video
    def get_upload_time(self):
        upload_date = self.soup.find(name="meta",
                                     attrs={"itemprop":
                                            "uploadDate"})['content'][:10]
        time = datetime.strptime(upload_date, '%Y-%m-%d')

        return time.date()
コード例 #15
0
ファイル: sghaze.py プロジェクト: situx/tuxbot
def findhazecity(bot,trigger):
	city=trigger.group(2)
	queryurl="http://sg1.aqicn.org/services/search/?lang=en&key=_1ca%2BQ%1Ff%0D%5D%1F%0D%2B%40H%1D%1C%1B&jsoncallback=waqiloader.jsonp.LJdMztTHefKXTry&s="+city+"&xtra&qid=2"
	queryurl2="http://aqicn.org/services/forecast/?city="+city+"&lang=en"
	print(queryurl2)
	result=urllib.request.urlopen(queryurl).read().decode()
	testdata=json.loads(result[result.index('(')+1:-2])['data'][1]
	testurl=json.loads(result[result.index('(')+1:-2])['data'][1]['url']
	if "faq" in testurl:
		testurl=json.loads(result[result.index('(')+1:-2])['data'][2]['url']
	result2=json.loads(urllib.request.urlopen(queryurl2).read().decode())
	if not "forecast"in result2:
		bot.say("Could not find "+city+"!  Syntax: .hazefc Country/City")
		return
	aqiforecast=result2['forecast']['aqi']
	displayres={}
	currentday=None
	curdaystr=""
	for elem in aqiforecast:
		time=iso8601.parse_date(elem['t'])
		if time.date()<=datetime.date.today():
			continue
		if currentday==None:
			currentday=time
		if currentday!=time.day:
			currentday=time.day
			curdaystr=str(time.day)+"."+str(time.month)+"."+str(time.year)
			if currentday not in displayres:
				displayres[curdaystr]=[]
		displayres[curdaystr].append(elem['v'][0])
		displayres[curdaystr].append(elem['v'][1])
		print(displayres)
	bot.say("Air Quality Forecast for "+city+":")
	for elem in sorted(displayres):
		bot.say(str(elem)+": "+rate_result(min(displayres[elem]))+"/"+rate_result(max(displayres[elem])))
コード例 #16
0
ファイル: bot.py プロジェクト: Abdeet/schedulebot
 def get_week(self, custom = False):
     if not custom:
         time = datetime.datetime.now()
         week_diff = math.floor((time.date() - self.schedule["start_week"]).days / 7) % self.schedule["weeks"]
     else:
         week_diff = math.floor((custom.date() - self.schedule["start_week"]).days / 7) % self.schedule["weeks"]
     return week_diff
コード例 #17
0
def superSmartCharge(carDataDF, chargeCen, shiftsByCar, time, chargeCapacity,
                     simulationDF, chargePtDF, pricesDF):
    # IF THERE ARE CARS IN THE CHARGE CENTRE
    if len(chargeCen) >= 1:
        listRows = []
        # FIND THE TIMES WHEN CARS LEAVE THE CHARGE CENTRE
        for cars in range(0, len(chargeCen)):
            f = chargeCen[cars]
            leaveTime = readTime(str(time.date()) + " 23:59:59")
            for g in range(0, len(shiftsByCar[str(f)])):
                startTime = readTime(shiftsByCar[str(f)].loc[g, 'startShift'])
                if startTime > time and startTime < leaveTime:
                    leaveTime = startTime

            hrsLeft = abs(rereadTime(leaveTime) - rereadTime(time))
            battLeft = abs(carDataDF.loc[f, 'battSize'] -
                           carDataDF.loc[f, 'battPerc'])
            listRows.append([f, battLeft / hrsLeft.total_seconds(), battLeft])

        leaveTimes = pd.DataFrame.from_records(
            listRows, columns=['car', 'priority', 'battLeft'])
        leaveTimes = leaveTimes.sort_values(by=['priority'], ascending=False)
        prioritySum = sum(leaveTimes.priority)

        # CHARGE CARS
        for h in range(0, len(leaveTimes)):
            car = leaveTimes.loc[h, 'car']
            batt = carDataDF.loc[car, 'battPerc']
            batt_size = carDataDF.loc[car, 'battSize']
            batt_left = leaveTimes.loc[h, 'battLeft']
            priority = leaveTimes.loc[h, 'priority']

            # IF CAR BATT IS NOT 100%, CHARGE CAR
            if batt < batt_size:
                # ALLOCATE CHARGE PT IF CAR DOESN'T HAVE ONE
                pt, carDataDF, chargePtDF = findChargePt(
                    carDataDF, car, chargePtDF)
                chargeRate = 0

                # IF CAR HAS A VALID CHARGE PT
                if not np.isnan(pt):
                    # READ MAX RATE
                    maxRate = chargePtDF.loc[pt, 'maxRate']

                    # CALCULATE CHARGE RATE
                    chargeRate = (priority / prioritySum) * chargeCapacity

                    # IF CHARGE RATE EXCEEDS MAX RATE
                    if chargeRate > maxRate: chargeRate = maxRate
                    # IF CHARGE RATE EXCEEDS CHARGE NEEDED
                    if chargeRate > batt_left: chargeRate = batt_left

                chargeCapacity -= chargeRate
                prioritySum -= priority
                carDataDF, simulationDF, chargePtDF = charge(
                    carDataDF, car, chargeRate, simulationDF, time, chargePtDF,
                    pricesDF)

    return carDataDF, simulationDF, chargePtDF
コード例 #18
0
def imap_date(time: datetime) -> str:
    d = time.date()
    mm = [
        "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct",
        "Nov", "Dec"
    ]
    m = mm[d.month - 1]
    return "%0d-%s-%d" % (d.day, m, d.year)
コード例 #19
0
ファイル: Auto_L.py プロジェクト: WyattMeng/L_flow
def convtPdTimeToDate(time):
    if isinstance(time, pd._libs.tslib.Timestamp) is True:
        dateStr = time.date()
    elif isinstance(time, unicode) is True or isinstance(time, str):
        dateStr = stringToDate(time)
    else:
        dateStr = 'date type is %s' % type(time)
    return dateStr 
コード例 #20
0
ファイル: Auto_L.py プロジェクト: WyattMeng/L_flow
def convtPdTimeToStr(time): # datetime.date(2012, 9, 18)
    if isinstance(time, pd._libs.tslib.Timestamp) is True:
        dateStr = time.date().strftime('%Y-%m-%d')
    elif isinstance(time, unicode) is True or isinstance(time, str):
        dateStr = time
    else:
        dateStr = 'date type is %s' % type(time)
    return dateStr  
コード例 #21
0
ファイル: import_data.py プロジェクト: liushu2000/finance
def date_to_date(input_date):
    if type(input_date) == float:
        seconds = (input_date - 25569) * 86400.0
        time = datetime.utcfromtimestamp(seconds)
        date = time.date()
    else:
        date = datetime.strptime(input_date, '%d/%m/%Y').date()

    return date
コード例 #22
0
ファイル: tests.py プロジェクト: sky-junjun/spark
 def test_filter_with_datetime(self):
     time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
     date = time.date()
     row = Row(date=date, time=time)
     df = self.sqlCtx.createDataFrame([row])
     self.assertEqual(1, df.filter(df.date == date).count())
     self.assertEqual(1, df.filter(df.time == time).count())
     self.assertEqual(0, df.filter(df.date > date).count())
     self.assertEqual(0, df.filter(df.time > time).count())
コード例 #23
0
ファイル: test_serde.py プロジェクト: apache/spark
 def test_filter_with_datetime(self):
     time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
     date = time.date()
     row = Row(date=date, time=time)
     df = self.spark.createDataFrame([row])
     self.assertEqual(1, df.filter(df.date == date).count())
     self.assertEqual(1, df.filter(df.time == time).count())
     self.assertEqual(0, df.filter(df.date > date).count())
     self.assertEqual(0, df.filter(df.time > time).count())
コード例 #24
0
 def getDataDate(self):
     #since the website is updated at 1200nn
     #the date should be adjusted
     #e.g. if the program is run at next day, it should return yesterday date5
     time = datetime.now()
     if time.hour < 12:
         d = timedelta(days = 1)
         time = time - d
     return str(time.date())
コード例 #25
0
def clean_it():
 to_be_removed = ['./marcalizer/out/bow/top/sift/100/1/', './marcalizer/out/bow/top/sift/200/1/', 
			'./marcalizer/out/sift/top/1/', './marcalizer/out/images/1/', './marcalizer/in/images/1/']
 for r in to_be_removed:
  #print 'cleaning ' + r
  for f in os.listdir(r):
   os.remove(r + f)
   #print f + ' removed'
 print 'all directories are cleaned'
 ts = date('%Y-%m-%d-%H:%M:%S')
 return ts
コード例 #26
0
def expand_years(title):
    years = findall(r"\'[0-9]{2}", title)
    if len(years) == 0:
        return title
    while len(years) > 0:
        y = years[0]
        if int(y[1:3]) > int(date('%y')):
            title = sub(y, sub(r"'", '19', y), title)
        else:
            replacement = input(f'ERROR: Fix year {y}: ')
            title = sub(y, replacement, title)
        years.pop(0)
    return title
コード例 #27
0
def clean_it():
    to_be_removed = [
        './marcalizer/out/bow/top/sift/100/1/',
        './marcalizer/out/bow/top/sift/200/1/', './marcalizer/out/sift/top/1/',
        './marcalizer/out/images/1/', './marcalizer/in/images/1/'
    ]
    for r in to_be_removed:
        #print 'cleaning ' + r
        for f in os.listdir(r):
            os.remove(r + f)
            #print f + ' removed'
    print 'all directories are cleaned'
    ts = date('%Y-%m-%d-%H:%M:%S')
    return ts
コード例 #28
0
ファイル: ga_optimizer.py プロジェクト: krmadala/example_nbs
    def get_capital_by_day_as_df(backtest_output):

        risk_dict = {}
        last_day = None
        for time, capital_obj in backtest_output['risk_provider'][backtest_output['risk_provider'].keys()[0]].\
                iteritems():
            current_day = time.date()
            if current_day != last_day:
                capital = capital_obj.capital
                risk_dict[current_day] = capital
                last_day = current_day
        risk_provider_pd = pd.DataFrame(risk_dict.items(),
                                        columns=['date', 'capital'])
        return risk_provider_pd
コード例 #29
0
ファイル: RiverImage.py プロジェクト: efekarakus/USGS
def output_image(colors_list):
  """
  Creates an image in the current directory of the result of the modeling
  @param colors_list: array of colors representing the Mississippi river
  """
  # find the image size
  size = len(colors_list) - 1
  width = colors_list[0]
  height = size/width
  size = (width, height)

  
  image = Image.new("RGBA", size)
  hue_value = colors_list[1]
  hue = []
  saturation = []
  value = []
  #Get Hue and Value
  for y in range(height):
    hue.append([])
    saturation.append([])
    value.append([])
    for x in range(width):
      hue[y].append(hue_value)
      value[y].append(255)
  
  for index in range(2, len(colors_list)):
    x = (index - 2)%width
    y = (index - 2)/width
	#If the patch is ground
    if(colors_list[index] == -1):
      hue[y][x] = 11.31 / 360.0
      value[y][x] = 0.59*255
      saturation[y].append(0.67)
    else:
      saturation[y].append(colors_list[index])

  r, g, b = hsv_to_rgb(hue, saturation, value)
  arr = np.dstack((r, g, b))
  new_img = Image.fromarray(arr.astype('uint8'), "RGB")	
  for index in range(2, len(colors_list)):
    x = (index - 2)%width
    y = (index - 2)/width
    pixel = new_img.getpixel( (x,y))
	# the order is inverted in the y-axis
    image.putpixel( (x, height-y-1), pixel)
  #endfor
  timestamp = date('%d-%m-%Y %H_%M_%S')
  image.save(image_title + timestamp + extension, image_type)
  return
コード例 #30
0
def calculate_boro_grade(Borough):
    rawdata = pd.read_csv(
        "DOHMH_New_York_City_Restaurant_Inspection_Results_raw.csv",
        low_memory=False)
    cleaningdata1 = rawdata.dropna(subset=['GRADE'])  # get the NAN eliminated
    cleaningdata2 = cleaningdata1.query(
        'GRADE != "Not Yet Graded"')  #get the "Not Yet Graded" eliminated
    ####convert  'GRADE DATE' to datetime
    cleaningdata2 = cleaningdata2.dropna(
        subset=['GRADE DATE'])  ### remember to drop nan!!!!
    cleaningdata2['GRADE DATE'] = pd.to_datetime(cleaningdata2['GRADE DATE'],
                                                 format='%m/%d/%Y')
    ###convert datetime to date use date() method
    cleaningdata2['GRADE DATE'] = [
        time.date() for time in cleaningdata2['GRADE DATE']
    ]
    ###sort the dataframe use time
    cleandata = cleaningdata2.sort(['GRADE DATE'])
    """this is the method to show one borough with different grades"""

    gradearray = (cleandata['GRADE']).unique()

    datearray = (cleandata['GRADE DATE']).unique()
    data = pd.DataFrame()
    for grade in gradearray:
        boro_date_grade = {}
        grademask = cleandata['GRADE'] == grade
        cleangradedata = cleandata[grademask]
        for date in datearray:

            datemask = cleangradedata[
                'GRADE DATE'] == date  #####use mask to get the date index
            dateDF1 = cleangradedata[datemask]
            boromask = dateDF1['BORO'] == Borough  ####get the grade mask
            boroDF = dateDF1[boromask]
            boro_date_grade[date] = len(boroDF)
    # print boro_date_grade
        data['date'] = boro_date_grade.keys()
        data = data.set_index(pd.DatetimeIndex(data['date']))
        del data['date']
        data[grade] = boro_date_grade.values()
        del boro_date_grade, boroDF, dateDF1, boromask, datemask, grademask, cleangradedata
    data = data.groupby([pd.TimeGrouper('M')]).sum()
    # print data
    data.plot()
    printtitle = 'grade_improvement_' + Borough.lower()
    plt.title(printtitle)
    plt.savefig(printtitle + '.pdf')
    plt.show(block=False)
    return None
コード例 #31
0
def time_series(tweet_iterator):
    time_data = pd.DataFrame([])
    for content in tweet_iterator:
        _id = content['_id']
        time = content['created_at']
        time_data = time_data.append(pd.DataFrame(
            {
                '_id': 1,
                'created_at': time.date()
            }, index=[0]),
                                     ignore_index=True)
    time_d = time_data[['created_at', '_id']].groupby(['created_at'],
                                                      as_index=False).sum()
    return time_d
コード例 #32
0
ファイル: views.py プロジェクト: ymcagodme/Norwalk-Judo
def member_check_in(request, pk):
    if request.method == 'POST':
        m = Member.objects.filter(pk=pk)[0]
        time = datetime.now() + timedelta(hours=-8)
        today = time.date()
        attendance_list = m.attendance_set.all().order_by('date').reverse()
        try:
            if attendance_list[0].date == today:
                return HttpResponse(0)
        except IndexError:
            pass
        m.attendance_set.create(date=today)
        m.save()
        return HttpResponse(today.strftime('%b %d, %Y'))
コード例 #33
0
ファイル: import_data.py プロジェクト: liushu2000/finance
def date_to_month(input_date):
    # print str(type(input_date))
    # print input_date
    if type(input_date) == float:
        seconds = (input_date - 25569) * 86400.0
        time = datetime.utcfromtimestamp(seconds)
        date = time.date()
    elif type(input_date) == str:
        date = datetime.strptime(input_date, '%d/%m/%Y').date()
    else:
        date = input_date

    month = str(date.year) + '-'+ str(date.month)
    this_month = datetime.strptime(month, '%Y-%m')
    return this_month
コード例 #34
0
def test_choice():
    n = 1 << 5
    size = 1 << 3
    a = [i for i in range(n)]
    n_test = 100000
    count = [0 for _ in range(n)]

    time.date()
    priority = [np.random.randint(0, 100) for _ in range(n)]

    # print(a)
    # print(choice(a, size, priority))
    # return

    for i in range(n_test):
        for v in choice(a, size, priority=priority):
            count[v] += 1

    total_count = sum(count)
    observed_probs = [count[i] / total_count for i in range(n)]
    total_priority = sum(priority)
    given_probs = [priority[i] / total_priority for i in range(n)]
    print('observed'.ljust(10), pretty(observed_probs))
    print('given'.ljust(10), pretty(given_probs))
コード例 #35
0
ファイル: crawler.py プロジェクト: gem763/crawly
    def _datize(time):
        if time is None:
            return ''

        try:
            time = pd.Timestamp(time)

            if time.tz is None:
                return str(time.date())

            else:
                return str(time.tz_convert('utc').date())

        except:
            return ''
コード例 #36
0
    def request_nws(self):
        # request hourly temperature forecase from national weather service
        url = 'https://api.weather.gov/points/' + self.lat + ',' + self.lon
        r = requests.get(url)

        city = r.json()['properties']['relativeLocation']['properties']['city']
        state = r.json(
        )['properties']['relativeLocation']['properties']['state']
        self.location = city + ", " + state

        r2 = requests.get(r.json()['properties']['forecastHourly'])

        out = pd.DataFrame(columns=[
            'lat', 'lon', 'year', 'month', 'day', 'hour', 'date', 'doy', 'temp'
        ])
        for i in range(len(r2.json()['properties']['periods'])):
            lat = self.lat
            lon = self.lon
            time = datetime.datetime.strptime(
                r2.json()['properties']['periods'][i]['startTime'][:-6],
                '%Y-%m-%dT%H:%M:%S')
            year = time.year
            month = time.month
            day = time.day
            hour = time.hour
            date = time.date()
            doy = time.timetuple().tm_yday
            temp = r2.json()['properties']['periods'][i]['temperature']
            temp = (temp - 32) * 5 / 9
            out.loc[i] = [lat, lon, year, month, day, hour, date, doy, temp]

        dates = out['date'].unique()
        self.dates = dates

        out = out.groupby(['doy'], as_index=False, group_keys=False).agg(
            {'temp': ['max', 'min', 'mean']})
        out.columns = ['doy', 'T_max', 'T_min', 'T_avg']
        out['T_rng'] = out['T_max'] - out['T_min']
        out.insert(1, "lat", self.lat)
        out.insert(2, 'lon', self.lon)
        out[['lat', 'lon']] = out[['lat', 'lon']].astype(float)

        # add the elevation and theoretical radiation
        ERA5_process.process.elevation_function(out, 'lat', 'lon')
        out_elev = ERA5_process.process(out)
        out_rad = out_elev.add_radiation()

        self.out_rad = out_rad
コード例 #37
0
ファイル: views.py プロジェクト: ymcagodme/Norwalk-Judo
def member_query(request, pk):
    try:
        m = Member.objects.filter(pk=pk)[0]
    except IndexError:
        return HttpResponseNotFound('Not Found')
    name = '%s %s' % (m.first_name, m.last_name)
    time = datetime.now() + timedelta(hours=-8)
    today = time.date()
    attendance_record = [ str(d) for d in m.attendance_set.all() ]
    attendance_record.reverse()
    is_checked_today = False
    try:
        if attendance_record[0] == str(today):
            is_checked_today = True
    except IndexError:
        pass
    return render_to_response('app.html', {'name':name, 'isCheckedToday': is_checked_today, 'record': attendance_record}, context_instance=RequestContext(request))
コード例 #38
0
def expand_years(string):
    """Expand years abbreviated with an apostrophie."""
    years = findall(r"\'[0-9]{2}", string)
    if not years:
        return string
    for year in years:
        if int(year[1:3]) > int(date('%y')):
            string = sub(year, sub(r"'", '19', year), string)
        elif ARGS.confirm:
            response = input(f'Fix year "{year}": ')
            if response == '':
                string = sub(year, sub(r"'", '20', year), string)
            else:
                string = sub(year, response, string)
        else:
            string = sub(year, sub(r"'", '20', year), string)
    return string
コード例 #39
0
def urls(page):
    """Generate sitemap.xml. Makes a list of urls and date modified."""
    pages = []
    # Article model pages
    articles = Article.query.order_by(Article.update_time).paginate(
        page, per_page=2000, error_out=False).items
    for article in articles:
        url = url_for('mip.miparticleDetails', id=article.id, _external=True)
        time = article.update_time or datetime(2017, 1, 1)
        modified_time = time.date().isoformat()
        pages.append([url, modified_time])

    sitemap_txt = render_template('urls.txt', pages=pages)
    response = make_response(sitemap_txt)
    response.headers["Content-Type"] = "text/plain"

    return response
コード例 #40
0
def contactinfo():
    scope = [
        'https://spreadsheets.google.com/feeds',
        'https://www.googleapis.com/auth/drive'
    ]
    creds = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)
    client = gspread.authorize(creds)

    contacts = []
    name = []
    contacts_return = []
    first_contact = []
    second_contact = []

    #시간
    time = datetime.datetime.now()
    secondtime = time.date()
    third = secondtime.strftime('%Y-%m-%d')

    today = datetime.datetime.today()
    month = today.strftime('%Y-%m')

    #당직 명단
    sheet1 = client.open('당직명단').worksheet(month)
    firstpage = sheet1.get_all_values()

    #연락처 명단
    sheet2 = client.open('당직연락처').worksheet("Contact").get_all_records()

    #날짜/현재 날짜 & 이름 연락처 일치

    for firstpage_row in firstpage:
        if firstpage_row[0] == third:
            for sheet2_row in sheet2:
                if sheet2_row['이름'] == firstpage_row[1]:
                    first_contact.append(sheet2_row['이름'])
                    temp = sheet2_row['연락처']
                    a = temp.replace("-", "")
                    first_contact.append(a)
                if sheet2_row['이름'] == firstpage_row[4]:
                    second_contact.append(sheet2_row['이름'])
                    temp = sheet2_row['연락처']
                    a = temp.replace("-", "")
                    second_contact.append(a)
    return first_contact, second_contact
コード例 #41
0
ファイル: getp.py プロジェクト: the-joking/getp
def install(apk,repo):
	print '[*]initializing download to repository:%s and package: %s' % (repo,apk)
	print '[*]initializing in %s ' % (time.date())
	try:
		src = urllib2.urlopen(repo)
		source_code = src.read()
		for n in repo:
			a += 1
			if n == '/':
				repo.rstrip(repo[:a])
				a = 0
		filesrc = open(repo,'wb')
		filesrc.write(source_code)
		filesrc.close()		
		print '[$]download conclued'
	except:
		print '[!]fail to save te file in your computer'
		sys.exit(1)
コード例 #42
0
def clean_it():
 to_be_removed = []
 if os.path.isdir('./marcalizer/out/bow/top/sift/100/1/'):
  to_be_removed.append('./marcalizer/out/bow/top/sift/100/1/')
 if os.path.isdir('./marcalizer/out/bow/top/sift/200/1/'):
  to_be_removed.append('./marcalizer/out/bow/top/sift/200/1/')
 if os.path.isdir('./marcalizer/out/sift/top/1/'):
  to_be_removed.append('./marcalizer/out/sift/top/1/')
 if os.path.isdir('./marcalizer/out/images/1/'):
  to_be_removed.append('./marcalizer/out/images/1/')
 if os.path.isdir('./marcalizer/in/images/1/'):
  to_be_removed.append('./marcalizer/in/images/1/')
 
 for r in to_be_removed:
  for f in os.listdir(r):
   os.remove(r + f)
 print 'all directories are cleaned'
 ts = date('%Y-%m-%d-%H:%M:%S')
 return ts
コード例 #43
0
ファイル: filemgt.py プロジェクト: mpk65/Aid-BI
    def __init__(self):
        self.main_dir       = "/Your/main/dir/"
        self.docx_dir       = self.main_dir + "d1_docx/"
        self.pdf_dir        = self.main_dir + "d1_pdf/"
        self.txt_dir        = self.main_dir + "d2_txt/"
        self.id_dir         = self.main_dir + "d3_id_txt/"
        self.json_dir       = self.main_dir + "d4_id_json/"
        self.couch_dir      = self.main_dir + "d5_couchdb_uploaded/"
        self.xmls_dir       = self.main_dir + "x1_sets_activities/"
        self.xml_dir        = self.main_dir + "x2_split_activities/"
        self.xml_couch_dir  = self.main_dir + "x3_couchdb_uploaded/"
        self.processed_dir  = self.main_dir + "processed_" + date("%Y%m%d%H%M%S") + "/"

        for attr, value in self.__dict__.iteritems():
            #print attr, value
            if not os.path.exists(value):
                os.makedirs(value)
        
        self.docx_ext       = ".docx"
        self.pdf_ext        = ".pdf"
        self.txt_ext        = ".txt"
        self.id_ext         = ".txt"
        self.json_ext       = ".json"
        self.xml_ext        = ".xml"
        self.couch          = "couch"
        self.couch_ext      = self.couch
        
#        change when publishing to production
#        self.couch_host = 'http://iati.iriscouch.com:5984/'
#        self.documents_couch = 'calais'
#        self.activities_couch = 'buza_iati'
#        self.xml_design_doc = 'buza_iati'
        self.couch_host = 'http://127.0.0.1:5984/'
        self.couch_user = ''
        self.couch_pwd  = ''
        self.documents_couch = 'aid'
        self.activities_couch = 'aid'
        self.xml_design_doc = 'bi'
コード例 #44
0
ファイル: weather.py プロジェクト: AoJ/alex
 def __init__(self, input_json, time=None, daily=False):
     # get current weather
     if time is None:
         self.temp = self._round_temp(input_json['main']['temp'])
         self.condition = self.CONDITION_TRANSL[input_json['weather'][0]['id']]
         return
     # get prediction
     if daily:  # set time to 13:00 for daily
         time = datetime.combine(time.date(), dttime(13, 00))
     ts = int(time.strftime("%s"))  # convert time to Unix timestamp
     for fc1, fc2 in zip(input_json['list'][:-1], input_json['list'][1:]):
         # find the appropriate time frame
         if ts >= fc1['dt'] and ts <= fc2['dt']:
             self.condition = self.CONDITION_TRANSL[fc1['weather'][0]['id']]
             # hourly forecast -- interpolate temperature
             if not daily:
                 slope = (fc2['main']['temp'] - fc1['main']['temp']) / (fc2['dt'] - fc1['dt'])
                 self.temp = self._round_temp(fc1['main']['temp'] + slope * (ts - fc1['dt']))
             # daily forecast: use daily high & low
             else:
                 self.temp = self._round_temp(fc1['temp']['day'])
                 self.min_temp = self._round_temp(fc1['temp']['min'])
                 self.max_temp = self._round_temp(fc1['temp']['max'])
コード例 #45
0
 def segment_original_report(self, tagged_clusters, 
                                    uni_encoded_cluster_tokenized_list, 
                                    original_report_path, percent_cat, 
                                    converted_fileid):
     re = ReportEnviroments()
     sent_by_topic = []
     for i in range(len(tagged_clusters)):
         sent_by_topic.append([sent for sent in tagged_clusters[i][0]])
     topic_titles = []
     for i in range(len(uni_encoded_cluster_tokenized_list)):
         topic_titles.append(uni_encoded_cluster_tokenized_list[i][0])
     topic_titles.append('uncategorized')
     with open(re.segmented_reports_corpus_path+converted_fileid, 'w') as f:
         timestamp = 'relatorio segmentado de ' + original_report_path \
                                                + '\nconvertido em ' \
                                                + date("%H:%M:%S %d/%m/%Y") \
                                                + '\n%cat=' + percent_cat 
         f.write(timestamp)
         for i in range(len(sent_by_topic)):
             topic_title = '\n\n' + topic_titles[i].upper() + '\n\n'
             f.write(topic_title)
             for j in range(len(sent_by_topic[i])):
                 f.write(sent_by_topic[i][j] + '\n')
     f.close()
コード例 #46
0
def marcalize_it():
 clean_it()
 log = open('./log/log.txt', 'a')
 rootdir = './screenshots-tmp/'
# select folders of all pairs in rootdir
 l_folder = []
 for root, subFolders, files in os.walk(rootdir):
  for folder in subFolders:
   l_folder.append(rootdir + str(folder))
 for f in l_folder:
  pictures = os.listdir(f)
  if len(pictures) > 2:
   for pic in pictures:
    print f +'/'+pic
    if pic[-4:] in (".png"):
     shutil.copy(f +'/'+pic, './marcalizer/in/images/1/')
    else:
     os.system('cat ' + f + '/' + pic + ' >> ./log/marcalizer-log.txt ')
     print 'texte'
   shutil.move(f, './log/compared-pictures/')
   pics = os.listdir('./marcalizer/in/images/1/')
   for pi in pics:
    if pi[-4:] not in (".png"):
     pics.remove(pi)
   p1 = './marcalizer/in/images/1/' + str(pics[0])
   p2 = './marcalizer/in/images/1/' + str(pics[1])
   print str(date('%Y-%m-%d-%H-%M-%S')) + ' starts with : [' + str(p1) + '] , ' + '[' + str(p2) + ']'
   os.system('echo "\n" >> ./log/marcalizer-log.txt')
   os.system('date  >> ./log/marcalizer-log.txt')
   os.system('java -jar ./marcalizer/marcalizer.jar -snapshot1 ' + p1 + ' -snapshot2 ' + p2 + ' >> ./log/marcalizer-log.txt')
   os.system('date  >> ./log/marcalizer-log.txt')
   os.system('echo "-------------------------------------------\n" >> ./log/marcalizer-log.txt')
   clean_it()
  else:
   print 'no pictures in the folder'
   shutil.move(f, './log/compared-pictures/')
コード例 #47
0
def print_error(message):
    sys.stderr.write("%s: ERROR: %s\n" % (date('%Y-%m-%d %H:%M:%S'), message))
コード例 #48
0
def track_progress_row(instance_name, shapefile, time, min_datetime, max_datetime):
    sql = "SELECT id FROM %s WHERE instance_name = '%s' AND input_shapefile = '%s' AND date(time) = '%s' AND min_time = '%s' AND max_time = '%s'" % (MONITOR_TABLE, instance_name, shapefile, time.date().isoformat(), min_datetime.isoformat(), max_datetime.isoformat())
    cursor.execute(sql)
    return cursor.fetchone()
コード例 #49
0
ファイル: recorder.py プロジェクト: groeg/scripts
#!/usr/bin/python
import os, sys
from time import strftime as date

#stream = "http://mp3stream1.apasf.apa.at:8000/listen.pls"
#length = 600

urls = { "fm4": "http://mp3stream1.apasf.apa.at:8000/listen.pls",
						"oe3": "www.google.com" }

if len(sys.argv) != 3:
	raise SystemExit, "usage: %s station-name length" % sys.argv[0]

stream_name = sys.argv[1]
length = sys.argv[2]

if stream_name not in urls:
	raise SystemExit, "%s not a valid radio station. Stations are: %s" % (stream_name, str(urls.keys()))

file_name = stream_name + "_" + date('%Y-%m-%d_%H:%M')

os.system("streamripper " + urls[stream_name] + " -D ~/Music/streams -a -l " + length)
os.system("mv ~/Music/streams/incomplete/*mp3 ~/Music/streams/" + file_name +
          ".mp3")
コード例 #50
0
def main():
 node0 = "http://im1a10.internetmemory.org:5555/wd/hub"
 node1 = "http://im1a11.internetmemory.org:5555/wd/hub"
 node2 = "http://im1a12.internetmemory.org:5555/wd/hub"
 opera0 = "http://im1a10.internetmemory.org:5556/wd/hub"
 opera1 = "http://im1a11.internetmemory.org:5556/wd/hub"
 opera2 = "http://im1a12.internetmemory.org:5556/wd/hub"

 try:
  opts, args = getopt.getopt(sys.argv[1:], 'hf:d:', ['help', 'file=', 'directory='])
 except getopt.GetoptError as err:
  print('\nERROR:')
  print (err)
  usage()
  sys.exit()
#create all necessary folders
 if not os.path.exists('log'):
  os.makedirs('log')
 if not os.path.exists('log/compared-pictures'): 
  os.makedirs('log/compared-pictures')
 if not os.path.exists('./screenshots-tmp/'):
  os.makedirs('./screenshots-tmp/')
 if not os.path.exists('./pairs/'):
  os.makedirs('./pairs/')
#check options
 for o,a in opts:
  if o in ('-h', '--help'):
   usage()
  elif o in ('-f', '--file'):
   if os.path.isfile(a):
    f = open(a,'r')
    l = [l for l in f.readlines() if l.strip()]
    split_it(l, 1)
    f.close()
    print '\n the file is now divided to pairs in the directory "pairs" '
    print 'you can use the option "-d" of the script or type "-h" for help '
   else:
    print ('is not a file')
  elif o in ('-d', '--directory'):
   ts = date('%Y-%m-%d-%H:%M:%S')
   dir = a 
   if dir[-1] != '/':
    dir = dir + '/'
   if os.path.isdir(dir):
    print 'is a dir'
    listing = os.listdir(dir)
    n = len(listing)
    print '~~~ ' + str(n) + ' ~~~'
    if (n <= 0):
     print 'capture in localhost'
     p = Process(target=take_5_snapshots(node0, listing, dir), args=())
     p.start()
    if (n > 0):
     print 'capture in other nodes' 
     lol = [listing[i:i+(n/3+1)] for i in range(0, n, n/3+1)]
     iteration = 0
     for l in lol:
      print '\n' 
      if iteration == 0 :
       print 'node0 = ' + str(l)
       p0 = Process(target=take_5_snapshots, args=(node0, opera0, l, dir,))
       p0.start()
      if iteration == 1 :
       print 'node1 = ' + str(l)
       p1 = Process(target=take_5_snapshots, args=(node1, opera1, l, dir,))
       p1.start()
      if iteration == 2 :
       print 'node2 = ' + str(l)
       p2 = Process(target=take_5_snapshots, args=(node2, opera2, l, dir,))
       p2.start()
      iteration += 1
   else:
    print 'is not a directory'
  else:
   print('ERROR')
   sys.exit(2)

 time.sleep(50)
 marcalize_it() 
コード例 #51
0
ファイル: main.py プロジェクト: aviatorblue62/sFPI
 def PauseImage(self):
     self.pause_image = './images/pause/' + date('%Y-%m-%d_%H%M%S') + '.gif'
     call(['cp','Graph_Update.gif',self.pause_image])
     self.image = self.pause_image
コード例 #52
0
ファイル: main.py プロジェクト: jacobcvt12/panda
				
				sql = "INSERT INTO Visits_to_all_Personalized_Pages" + \
					  "(Date, MRLP_VISIT, MYA_Overview_Visit, " + \
					  "MRLP_and_MYA_Overview, Earn_No_MRLP_MYA, " + \
					  "Use_No_MRLP_MYA, Trip_Planner_No_MRLP_MYA, " + \
					  "Deals_No_MRLP_MYA_Use) VALUES (%s);" % values
				
				try:
					c.execute(sql)
					conn.commit()
				except IntegrityError:
					print "Duplicate dates in table (Ignoring second date)"
				
				row_count += 1
			
			tbl_upload_date = date('%Y-%d-%m')
			
			# insert file name (and info) into table uploaded so that panda
			# won't try to upload it again
			c.execute("INSERT INTO uploaded (file, upload_date, row_count) VALUES ('%s', '%s', %d);" \
				% (file_name, tbl_upload_date, row_count))
			conn.commit()
		
	# glob through csvs
	for file in glob(join(path, 'files', 'extracts', '*.csv')):
		# get file name from full path
		file_name = file.split('\\')[-1]
		
		# don't prompt for previously uploaded files
		if file_name not in uploaded_files:
			usr_upload.append(file_name)
コード例 #53
0
ファイル: main.py プロジェクト: jacobcvt12/panda
def daily_upload(file):
	'''parses and uploads text from csv to database'''
	# open text file
	f = open(join(path, 'files', 'extracts', file), 'rb')
	
	# set read to False to skip first few rows
	# initialize to_db to append to
	read = False
	to_db = []
		
	for row in reader(f):
		if not read:
			# check if this is the data header
			try:
				if row[0] == 'Date' and 'instances' in row[2].lower():
					read = True
			except IndexError:
				# index error on an empty row or row with less than 3 items
				pass
		
		else:
			# check if there are bookings, impressions,
			# or clicks before proceeding
			if sum([int(n) for n in row[2:6]]):
				# perform HTML clean up
				row[1] = row[1].replace('%3A', ':').replace('%7C', '|').replace('%3D', '=').replace('%20', '')
				
				# clean up date
				excel_date = datetime(1899, 12, 31)
				date_dt = datetime.strptime(row[0], '%b %d, %Y')
				
				if datetime(2014, 3, 15) <= date_dt <= datetime(2014, 4, 17):
					row[1] = sub(":personalizedPlacement(-[0-9]{1,}){3}", 
					            "", row[1])
				
				row[0] = date_dt.strftime('%Y-%m-%d')
				excel_serial = str((date_dt - excel_date).days)
				
				# emulate function of column A in excel workbook 
				# by combining (modified) date and link
				row = [excel_serial + row[1]] + row
				
				# prk parsing
				prk = row[2]
				if prk == '' or prk == 'None':
					row = row + [''] * 6
				else:					
					prk_split = prk.split('|')
					page = prk_split[0]
					segment = prk_split[1]
					page_section = \
						prk_split[2].split('pageSection=')[1].split(':')[0]
					placement = prk_split[2].split('placement=')[1]
					page_section_placement = prk_split[2]
					content_id = prk_split[3]
			
					row = row + [page, segment, page_section, placement, \
								 page_section_placement, content_id]									
				
				# add categories and groupings
				# page_group_dict imported from dicts.py
				page_group = "Other"
				
				for key in page_group_dict:
					if key in page.lower():
						page_group = page_group_dict[key]
						break
						
				if 'ritz' in page:
					rewards_program = "RCR"
				else:
					rewards_program = "MR"
				
				# assign rule_group based on segment
				rule_group = assign_rule(segment)
				
				control_group_key = str(date_dt.year) + '_' + \
					str(excel_date.month) + '_' + str(excel_date.day) + '_' +\
					page_section + '_' + placement
					
				# site_dict imported from dicts.py
					
				site = 'Other'
				
				for key in site_dict:
					if key in page:
						site = site_dict[key]
						break
						
				if "ControlGroup" in segment:
					department = "TBD"
				elif segment in ('', 'None') or '_' not in segment:
					department = 'Unavailable'
				else:
					department = segment.split('_')[0]
					
				row = row + [page_group, rewards_program, rule_group, 
					control_group_key, site, department]
										
				# append data to to_db
				to_db.append(row)
		
	# close text file
	f.close()
	
	# insert rows of csv into table mrlp
	str_SQL = 'INSERT INTO mrlp (ComboKey, Date, Personalization_Rule_Key, Instances, Link_Impressions, Bookings, Revenue, Bookings_Participation, Revenue_Participation, Rewards_Enrollments, Credit_Card_Apps, Hotel_Availability_Search_Visits, Promo_Registrations, Page, Segment, PageSection, Placement, PageSectionPlacement, ContentID, Page_Group, Rewards_Program, Rule_Group, ControlGroupKey, Site, Department) VALUES ' + str(tuple(['?'] * 25)).replace("'", '') + ';'
	c.executemany(str_SQL, to_db)
	conn.commit()
	
	row_count = len(to_db)
	tbl_upload_date = date('%Y-%d-%m')
	
	# insert file name (and info) into table uploaded so that panda
	# won't try to upload it again
	c.execute("INSERT INTO uploaded (file, upload_date, row_count) VALUES ('%s', '%s', %d);" \
		% (file, tbl_upload_date, row_count))
	conn.commit()
コード例 #54
0
ファイル: main.py プロジェクト: aviatorblue62/sFPI
 def saveProject(self):
     save_image = './images/' + date('%Y-%m-%d_%H%M%S') + '.gif'
     call(['cp','Graph_Update.gif',save_image])
コード例 #55
0
ファイル: whisperbot.py プロジェクト: koo5/puzzled-tree
def say(message):
	data = "%s : %s" % (charactername, message)
	mapserv.sendall("\x8c\0%s%s" % (struct.pack("<H", len(data)+4), data))
	file.write("[" + date('%H:%M:%S') + "] " + "Me: " + message + "\n")
コード例 #56
0
ファイル: tweet_anal.py プロジェクト: andikan/twitter-ts
        if str(create_time.date()) in time_tweet:
            time_tweet[str(create_time.date())] += 1
        else:
            time_tweet[str(create_time.date())] = 1
        offset += 1

start_time = sorted(time_tweet.iterkeys())  #sort time_tweet key
print "collect tweet start at : " + str(start_time[0])
print "collect tweet end at : " + str(start_time[-1])

end_date = str((parse(start_time[-1])+datetime.timedelta(days=1)).date())

current_date = start_time[0]
while current_date != end_date:
    time = parse(current_date)
    if str(time.date()) not in time_tweet:
        time_tweet[str(time.date())] = 0
    print "time : %s  =>  value : %d" %(str(time.date()), time_tweet[str(time.date())])
    data = {"date":  str(time.date()),
            "count": time_tweet[str(time.date())] }
    date_tweet_collection.insert(data)
    
    current_date = str((time + datetime.timedelta(days=1)).date())







#for k in sorted(time_tweet.iterkeys()):
コード例 #57
0
def verbose(message):
    if options.verbose:
        print "%s: %s" % (date('%Y-%m-%d %H:%M:%S'), message)
コード例 #58
0
ファイル: whisperbot.py プロジェクト: koo5/puzzled-tree
def main():
	global mapserv
	login = socket.socket()
	login.connect((server, port))
	print("login connected")
	login.sendall("\x64\0\0\0\0\0%s%s\0" % (account.ljust(24, '\0'), password.ljust(24, '\0')))

	pb = PacketBuffer()
	id1 = accid = id2 = sex = 0
	charip = ""
	charport = 0
	while True:
		data = login.recv(1500)
		if not data:
			break
		pb.feed(data)
		for packet in pb:
			if packet.startswith("\x69\0"): # login succeeded
				id1, accid, id2 = struct.unpack("<LLL", packet[4:16])
				sex = ord(packet[46])
				packet = packet[47:]
				charip = parse_ip(packet[:4])
				charport = struct.unpack("<H", packet[4:6])[0]
				login.close()
				break
		if charip:
			break

	assert charport

	char = socket.socket()
	char.connect((charip, charport))
	print("char connected")
	char.sendall("\x65\0%s\0\0%s" % (struct.pack("<LLL", accid, id1, id2), chr(sex)))
	char.recv(4)

	pb = PacketBuffer()
	mapip = ""
	mapport = 0
	charid = 0
	while True:
		data = char.recv(1500)
		if not data:
			break
		pb.feed(data)
		for packet in pb:
			if packet.startswith("\x6b\0"):
				char.sendall("\x66\0%s" % chr(character))
			elif packet.startswith("\x71\0"):
				charid = struct.unpack("<L", packet[2:6])[0]
				mapip = parse_ip(packet[22:26])
				mapport = struct.unpack("<H", packet[26:28])[0]
				char.close()
				break
		if mapip:
			break

	assert mapport

	mapserv = socket.socket()
	mapserv.connect((mapip, mapport))
	print("map connected")
	mapserv.sendall("\x72\0%s" % struct.pack("<LLLLB", accid, charid, id1, id2, sex))
	mapserv.recv(4)

	mapserv.setblocking(0)
	mapserv.settimeout(0.1)

	pb = PacketBuffer()
	gotresponse = set()

	while True:
		si,so,se = select.select([sys.stdin],[],[], 0.1)
		for s in si:
			if s == sys.stdin:
				message = sys.stdin.readline()[:-1]
				if len(message) > 0:
					if message[0] == '/':
						if len(message) > 1:
							if (message[1] == 'q') or (message[1] == 'w'):
								nick = string.split(message)[1]
								text = string.join(string.split(message)[2:])
								mapserv.sendall(whisper(nick, text))
								break
							elif ord(message[1]) in range(ord('1'), ord('9')):
								smile(ord(message[1]) - ord('0'))
								break
				say(message)

		try:
			data = mapserv.recv(1500)
			if not data:
				break#exit

			pb.feed(data)
			for packet in pb:
#				print [hex(ord(x)) for x in packet]
				if packet.startswith("\x73\0"): # connected
					mapserv.sendall("\x7d\0") # map loaded
					if sit:
						mapserv.sendall("\x89\0\0\0\0\0\x02") # sit
					smile(2)

				elif packet.startswith("\xc0\0"): #smiley
					if be_confused_tree:
						if packet[6] == '\2':
#						if random.randint(0,1) == 1:
							print "o_0"
							time.sleep(0.5)
							smile(2)
#						else:
#							print "pffft"

				elif packet.startswith("\x8e\0"): # server speech
					message = packet[4:]
					print "[" + date('%H:%M:%S') + "] " + message
					if "automaticly banned for spam" in message:
						time.sleep(3)
				elif packet.startswith("\x8d\0"): # char speech
					message = re.sub(r'(##[0-9])',color_replacement_regex,packet[8:-1])
					print "[" + date('%H:%M:%S') + "] " + message
					if len(message) > console_width:
						print ""
					file.write("[" + date('%H:%M:%S') + "] " + message + "\n")
					#file.flush()
					if be_confused_tree:
						(nick, msg) = message.split(" : ")
						msg = msg.lower()
						if msg.startswith(charactername.lower()):
							if "shut up" in msg:
								say("i can't do that yet:(")
							if "help" in msg or "daddy" in msg or "mommy" in msg:
								say("[@@https://github.com/koo5/puzzled-tree/blob/master/whisperbot.py |https://github.com/koo5/puzzled-tree/blob/master/whisperbot.py@@]")
					
					
					time.sleep(0.1)

				elif packet.startswith("\x97\0"):#a whisper
					nick = packet[4:28].rstrip("\0")
					message = packet[28:]
					file.write("[" + date('%H:%M:%S') + "] " + "!!! " + nick + ": " + message + "\n")
					print "[" + date('%H:%M:%S') + "] " + "!!! " + nick + ": " + message


		except socket.timeout:
			pass
コード例 #59
0
ファイル: pysel_core.py プロジェクト: ezeakeal/pySel
DIR_CONF           = '%s/conf'                % (DIR_APP)
DIR_LOG            = '%s/output/log'          % (DIR_APP_ROOT)
DIR_RAW            = '%s/output/raw'          % (DIR_APP_ROOT)
DIR_REPORT         = '%s/output/report'       % (DIR_APP_ROOT)
DIR_RESPONSE       = '%s/output/response'     % (DIR_APP_ROOT)
DIR_CONF           = '%s/conf'                % (DIR_APP)

# File Extensions
EXT_TEST           = 'json'
EXT_RAW            = 'raw'
EXT_LOG            = 'log'
EXT_REPORT         = 'rpt'
EXT_RESPONSE       = 'txt'
BASH_EXEC          = '/bin/bash'

FILE_LOG           = 'pysel_%s' % (date("%Y%m%dT%H%M%S"))

# Customize the HTTP Request Headers
USER_AGENT_STR     = 'FeedHenry PySel Automated Tester 0.2'
CONNECTION_TIMEOUT = 30

# Customize the Implicit waits for elements
IMPLICIT_WAIT      = 30

# Create global counters
COUNT_ERROR        = 0
COUNT_WARNING      = 0
COUNT_PIC          = 0

# Define our own severity levels
PYSEL_OK           = 0                    # OK