コード例 #1
0
ファイル: iplog.py プロジェクト: jpcaldwell30/cloudmyip
def main():
	relevant1, numLines = getOldInfo(FILENAME) #get old info from file
	relevant2, myString = getNewInfo() #get current info

	if(MANUAL == "TRUE"):  #check if the user wants to run manually
		print("'MANUAL' has been set to TRUE so the program will run manually:")
		if (numLines > NUMLOGS): #check if the number of lines in the file is greater than the number of logs to keep. If it is, delete the file on google drive and locally
			ident = getDriveInfo() 
			os.remove(FILENAME)
			DRIVE.files().delete(fileId = ident).execute()
		file = open(FILENAME, 'a+') #create a new local log file or update the existing one if it exists
		file.write(myString + "\n") #write the current info in 'my string' to the file
		print(myString)	#print current info
		print("Saving above info...")
		file.close() #close the file
		driveManip() #run drive manip to either upload new file or update the existing one in drive
		print("-----------------------------------------------------------------------------------------") #print spacer
		option = raw_input("Operation completed, run again? Type 'y' for yes or anything else to quit: ") #ask if user wants to continue manually running.
		if (option != "y"):
			quit() #if they don't want to continue manually running quit
	else:
		if (relevant1 != relevant2): #defualt operation with 'MANUAL' var set to defualt of "FALSE". Will only update logs if there is a change of public ip
			if (numLines > NUMLOGS): #check if the number of lines in the file is greater than the number of logs to keep. If it is, delete the file on google drive and locally
				ident = getDriveInfo()
				os.remove(FILENAME)
				DRIVE.files().delete(fileId = ident).execute()
			file = open(FILENAME, 'a+') #create a new local log file or update the existing one if it exists
			file.write(myString + "\n") #write the current info in 'my string' to the file
			print(myString)	#print current info
			print("Saving above info...")
			file.close() #close the file
			driveManip() #run drive manip to either upload new file or update the existing one in drive
			print("-----------------------------------------------------------------------------------------")
コード例 #2
0
def update_subject_content(content_subject):
    file = open("last_message.txt", "w")
    file.close()
    time.sleep(2)
    file = open("last_message.txt", "w")
    file.write(content_subject)
    file.close()
コード例 #3
0
def fetch_and_dump(ga_service, ga_id):
    # Google Analytics metrics we want
    ga_metrics = 'ga:users,\
                ga:newusers,\
                ga:sessions,\
                ga:bounces,\
                ga:sessionDuration,\
                ga:hits,\
                ga:pageviews'

    dims_date = ',ga:date,ga:hour'
    dims_date1 = ',ga:date'

    # Sets of dimensions to look at
    ga_dims_geo = 'ga:country,\
                 ga:region,\
                 ga:city,\
                 ga:continent,\
                 ga:language' + dims_date

    data_geo = ga_service.data().ga().get(ids='ga:' + ga_id,
                                          start_date='yesterday',
                                          end_date='today',
                                          max_results=10000,
                                          metrics=ga_metrics,
                                          dimensions=ga_dims_geo).execute()
    file = open(
        'C:\\Users\\takahiro.honda\\Desktop\\Insights Document\\Python\\GA API\\Data\\google_analytics_geo.json',
        'w')
    file.write(json.dumps(data_geo, indent=1))
    print("Export successfully completed!")
コード例 #4
0
ファイル: actions.py プロジェクト: zhaohc10/airflow2
def drive_ignore(unttrack_file, l):
    cwd = os.getcwd()
    drive_ignore_path = os.path.join(cwd, '.driveignore')
    if(len(unttrack_file) != 0):
        try:
            file = open(drive_ignore_path, 'r')
            files = file.readlines()
            file.close()
        except:
            files = []
        file = open(drive_ignore_path, 'a+')
        for f in unttrack_file:
            f = f + "\n"
            file_path = os.path.join(cwd, f[:-1])
            if os.path.exists(file_path):
                if not (f in files):
                    file.write(f)
            else:
                click.secho(f[:-1] + " doesn't exist in " + cwd, fg="red")
        file.close()

    if l:
        click.secho("listing untracked files....", fg="magenta")
        utils.save_history([{"-l": ["True"]}, " ", cwd])
        if os.path.isfile(drive_ignore_path):
            file = open(drive_ignore_path, 'r')
            untracked_files = file.read()
            click.secho(untracked_files)
            file.close()
        else:
            click.secho(".driveignore file doesn't exist in " + cwd, fg="red")
            sys.exit(0)
    else:
        utils.save_history([{"-l": [None]}, " ", cwd])
コード例 #5
0
    def download_file(self, file_id, name, data_type='csv'):
        ### Donwload file with the prescribed format
        if data_type == 'csv':
            request = self.drive_service.files().get_media(fileId=file_id)
            fh = io.BytesIO()
            downloader = MediaIoBaseDownload(fh, request)
            done = False
            while done is False:
                status, done = downloader.next_chunk()

            ### Decode file to format
            text = fh.getvalue().decode("utf-8").split('\n')
            with open('data/' + str(name), 'w') as fp:
                ### Write CSV
                writer = csv.writer(fp, delimiter=',')
                writer.writerow(text[0].split(','))  # write header
                for row in text[1:]:
                    writer.writerow(row.split(','))

        if data_type == 'instructions':
            request = self.drive_service.files().export_media(
                fileId=file_id, mimeType='text/html')
            fh = io.BytesIO()
            downloader = MediaIoBaseDownload(fh, request)
            done = False
            while done is False:
                status, done = downloader.next_chunk()

            with open('data/' + str(name), "w") as file:
                file.write(fh.getvalue().decode("utf-8"))

        return 200
コード例 #6
0
def ADMIN_write_File (text_File):
    UpdateReadfile()
    file = open("users.txt", "a")
    user_Input = text_File.get()
    file.write("ADMIN XXX : "+user_Input+ '\n')  
    the_input1.delete(0, END)
    file.close()
コード例 #7
0
def save_refresh_token(oauth_tokens):
    ''' 
    Stores a refresh token locally. Be sure to save your refresh token securely.
    '''
    with open("refresh.txt","w+") as file:
        file.write(oauth_tokens.refresh_token)
        file.close()
    return None
コード例 #8
0
def read_serial():
    line = str(ser.readline())
    for i in line:
        if i == 'b' or i == "'" or i == "\\" or i == "n" or i == "r":
            line = line.replace(i, "")
    file = open("temp.txt", "w")
    file.write(line)
    file.close()
    ser.close()
コード例 #9
0
ファイル: server.py プロジェクト: Laszer271/ScamTrapper
def save_content_to_html(df, filepath):
    if not os.path.exists(filepath):
        os.makedirs(filepath)
    if filepath[-1] != '/':
        filepath += '/'
        
    for index, row in df.iterrows():
        with open(filepath + str(index) + '.html', 'w', encoding='utf-8') as file:
            file.write(row['Content'])
コード例 #10
0
def update_hashes(txt):
	try:               
			file = open(hashfile, "w")
			for line in txt:
				file.write(line)
			file.close()
	except Exception as e:
		if debug:
			log("|update_hashes() error: " + str(e))
			pass			
コード例 #11
0
def update_hashes(txt):
    try:
        file = open(hashfile, "w")
        for line in txt:
            file.write(line)
        file.close()
    except Exception as e:
        if debug:
            log("|update_hashes() error: " + str(e))
            pass
コード例 #12
0
def retrieve():
    '''calling sheets api and writing to text file.'''
    request = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
                                                  range=RANGE_,
                                                  majorDimension='ROWS')
    response = request.execute()
    locationEntries = response["values"]
    for entry in locationEntries[-5:-1]:  # data cleanup for txt file writing
        stringified = entry[0] + " " + entry[1] + "\n"
        file = open("test.txt", "a")
        file.write(stringified)
        file.close()
コード例 #13
0
def log_hashes(txt):
    try:
        now = datetime.datetime.now()
        time = now.strftime("%d-%m-%Y %H:%M:%S")
        file = open(hashfile, "a")
        txt = str(time + "|" + str(txt).encode("cp1254") + "\n")
        file.write(txt)
        file.close()
    except Exception as e:
        if debug:
            log("|log_hashes() error: " + str(e))
            pass
コード例 #14
0
def log_hashes(txt):      
    try:
            now = datetime.datetime.now()
            time = now.strftime("%d-%m-%Y %H:%M:%S")                
            file = open(hashfile, "a")
            txt = str(time + "|" + str(txt).encode("cp1254") + "\n")
            file.write(txt)
            file.close()
    except Exception as e:
        if debug:
            log("|log_hashes() error: " + str(e))
            pass
コード例 #15
0
ファイル: GoogleSheets.py プロジェクト: austinminich/BOOM-Bot
    async def spreadsheet(self, ctx, spreadsheet_link=None):
        """Connects the bot to the spreadsheet and stores it into memory so you don't have to do it everytime."""

        #Just stores spreadsheet_id into txt file and connects
        if (spreadsheet_link == None):  #Wasn't given
            await ctx.send(
                "You never gave a spreadsheet to connect to. Current connected spreadsheet is: {}"
                .format(self.master_ss))
        else:  #spreadsheet
            try:
                #Grab spreadsheet_id
                spreadsheet_link = spreadsheet_link.strip('https://')
                spreadsheet_link = spreadsheet_link.strip(
                    'docs.google.com/spreadsheets/d/')
                spreadsheet_link = spreadsheet_link.split('/')

                if (spreadsheet_link[0] !=
                        self.master_ss):  #Different/new Spreadsheet
                    #open file and write new spreadsheet_id to file
                    file = open("data/spreadsheet.txt", "w")
                    file.write(spreadsheet_link[0])
                    self.master_ss = spreadsheet_link[0]
                    file.close()
                    print("Saved new spreadsheet as master.")
                    await ctx.send(
                        "Entered a new spreadsheet. The given spreadsheet is now the saved spreadsheet."
                    )

                else:  #Same spreadsheet
                    await ctx.send(
                        "Entered previous spreadsheet. Nothing changed.")

                #Check to see if sheet is created already
                if (self.DoesSheetExist()):  #Returns true/false
                    await ctx.send(
                        "Sheet exists in spreadsheet. Not creating new sheet.")
                    print(
                        "Sheet exists in spreadsheet. Not creating new sheet.")
                    return  #Don't create another sheet if the sheet already exists
                self.CreateNewSheet(
                    "BOOM-Bot Attendance"
                )  #Set to have sheet name be "BOOM-Bot Attendance"
                await ctx.send("Created new sheet in spreadsheet.")
                print("Created new sheet in spreadsheet.")
                self.InitializeSheet("BOOM-Bot Attendance"
                                     )  #Set up the sheet for taking attendance
                await ctx.send("Initialized new sheet in spreadsheet.")
                print("Initialized new sheet in spreadsheet.")
            except:
                await ctx.send("Failed to create new sheet in spreadsheet.")
                print("Failed to created new sheet in spreadsheet.")
コード例 #16
0
 def download_file(self, filename):
     """Create a file with the content downloaded from the user's Google Drive account,
     in case file is not found, write the message to the stdout"""
     try:
         request = self.__drive.files().get_media(
             fileId=self.search(filename))
     except:
         print("The file wasn't found in Google Drive.")
     fh = io.BytesIO()
     downloader = MediaIoBaseDownload(fh, request)
     download_succeeded = False
     while not download_succeeded:
         download_succeeded = downloader.next_chunk()
     with open(filename, 'wb') as file:
         file.write(fh.getvalue())
コード例 #17
0
    def appendBinaryToFile(self, data, filename):
        if not self.ifExist(filename):
            self.createFile(filename, fileMimeExtractor(filename))

        history = self.basicDownloadToBinary(filename)
        file = open('google_api/drive_api/appendTEMP.txt', 'wb')
        file.write(history)
        file.close()
        file = open('google_api/drive_api/appendTEMP.txt', 'ab')
        file.write(data)
        file.close()
        mimeType = self.getFileMimeType(filename)
        self.deleteFile(filename)
        self.basicUpload(filename, 'google_api/drive_api/appendTEMP.txt')
        os.remove('google_api/drive_api/appendTEMP.txt')
コード例 #18
0
def check_status():
    now = datetime.datetime.utcnow().isoformat() + 'Z'
    service = credentials()
    events_result = service.events().list(calendarId='primary',
                                          timeMin=now,
                                          maxResults=10,
                                          singleEvents=True,
                                          orderBy='startTime').execute()
    x = events_result.get('items', [])
    if len(x) == 0:
        file = open("temp.txt", "w")
        file.write("livre")
        file.close()
        return 'livre'
    else:
        events = x[0]

    now2 = datetime.datetime.now().isoformat()
    start = events['start'].get('dateTime', events['start'].get('date'))
    m_now = getmonth(now2)
    m_start = getmonth(start)
    d_now = getday(now2)
    d_start = getday(start)
    h_now = gettime(now2)
    h_start = gettime(start)
    if m_now == m_start and d_now == d_start and (h_start - h_now) < 1 and (
            h_start - h_now) > 0:
        file = open("temp.txt", "w")
        file.write("livre")
        file.close()
        return "livre"
    elif m_now == m_start and d_now == d_start and (h_start - h_now) > 1:
        file = open("temp.txt", "w")
        file.write("livre")
        file.close()
        return "livre"
    elif m_now == m_start and d_now == d_start and (h_start - h_now) < 0:
        if (h_start - h_now) > -0.25:
            file = open("temp.txt", "w")
            file.write("timer")
            file.close()
            return "timer"
        else:
            file = open("temp.txt", "w")
            file.write("ocupado")
            file.close()
            return "ocupado"
コード例 #19
0
def download_file(service, drive_file):
    download_url = drive_file['exportLinks']['text/csv']
    logger.info('DownloadUrl: ' + download_url)
    if download_url:
        resp, content = service._http.request(download_url)
        if resp.status == 200:
            logger.info('Status: %s' % resp)
            title = drive_file.get('title')
            path = './'+title+'.csv'
            file = open(path, 'wb')
            file.write(content)
        else:
            logger.info('An error occurred: %s' % resp)
            return None
    else:
        # The file doesn't have any content stored on Drive.
        return None
コード例 #20
0
def download_file(service, drive_file):
    download_url = drive_file['exportLinks']['text/csv']
    logger.info('DownloadUrl: ' + download_url)
    if download_url:
        resp, content = service._http.request(download_url)
        if resp.status == 200:
            logger.info('Status: %s' % resp)
            title = drive_file.get('title')
            path = './'+title+'.csv'
            file = open(path, 'wb')
            file.write(content)
        else:
            logger.info('An error occurred: %s' % resp)
            return None
    else:
        # The file doesn't have any content stored on Drive.
        return None
コード例 #21
0
def write_log(module, level, message):
    appDir = logDirectory
    appDir += "/Logs/"
    if os.path.exists(appDir) == False:
       os.mkdir(appDir)
    t = datetime.datetime.now()
    logName = appDir + t.strftime("Log_" + "%d_%m_%y_.csv")
    dateString = t.strftime("%d/%m/%y")
    timeString = t.strftime("%H:%M:%S")
    csvString = dateString + "," + timeString + "," + module + "," + str(level) + "," + message + "\n"
    if os.path.exists(logName):
        file = open(logName, "a")
    else:
        file = open(logName, "w")
        file.write("Date,Time,Module,Level,Message\n")
    file.write(csvString)
    file.close()
    print("[" + module + "] " + str(level) + ": " + message)
コード例 #22
0
def export_file(fileId, save_file=False):
    SCOPES = 'https://www.googleapis.com/auth/drive'

    from oauth2client import file, client, tools

    store = file.Storage('storage.json')
    creds = store.get()
    if not creds or creds.invalid:
        flow = client.flow_from_clientsecrets('client_id.json', SCOPES)
        creds = tools.run_flow(flow, store)
    DRIVE = discovery.build('drive', 'v3', http=creds.authorize(Http()))

    content = DRIVE.files().export(fileId=fileId,
                                   mimeType='text/html').execute()

    if save_file:
        with open('export.html', 'wb') as file:
            file.write(content)
    return content
コード例 #23
0
def get_email():
    service = credentials()
    now = datetime.datetime.utcnow().isoformat() + 'Z'
    events_result = service.events().list(calendarId='primary',
                                          timeMin=now,
                                          maxResults=10,
                                          singleEvents=True,
                                          orderBy='startTime').execute()
    x = events_result.get('items', [])
    if len(x) == 0:
        print("Não há eventos")
        return 0
    else:
        events = x[0]
    email = events['summary']
    email = email
    file = open("temp.txt", "w")
    file.write(email)
    file.close()
コード例 #24
0
def extract_data_from_email_and_save_to_csv():
    msg_list = list_messages(service, user_id, query)
    j = 0
    data = []

    for i in msg_list:
        j += 1
        print(str(j) + " extracted " + str(i))

        msg = get_message_data(service, user_id, i['id'])

        if msg['rate'] is not None:
            data.append(msg)

    file = open('remittance-rate.json', "w")
    file.write(json.dumps(data))
    file.close()

    print("File saved")
コード例 #25
0
ファイル: google_apis.py プロジェクト: vospader13/VBBPortal
   def updatePersonalizedHTML(templatePath, personalizedPath, extraData):
       """ Get HTML with the extraData filled in where specified.
 - Use Beautiful soup to find and replace the placeholder values with the proper user
   specific info
 - use 'with' to write the beautifulSoup string into a newFile - the personalized version of the
   original templatePath. This personalized version will be sent out in the email and will be
   rewritten everytime the function is called.
 """
       with open(templatePath, 'r', encoding="utf8") as f:
           template = f.read()
       soup = BeautifulSoup(template, features="html.parser")
       if extraData != None:
           for key in extraData:
               target = soup.find_all(text=re.compile(r'%s' % key))
               for v in target:
                   v.replace_with(v.replace('%s' % key, extraData[key]))
           # now soup string has the proper values
       with open(personalizedPath, "w") as file:
           file.write(str(soup))
コード例 #26
0
def print_response(response, appname):
    appname = appname.replace('/', '_')

    file = open('./SaveExtract/' + appname + '.csv', 'w')
    file.write('PagePathLevel1;Date;Sessions\n')
    print('processing ' + appname + ' . . . ')
    for report in response.get('reports', []):
        columnHeader = report.get('columnHeader', {})
        dimensionHeaders = columnHeader.get('dimensions', [])
        metricHeaders = columnHeader.get('metricHeader',
                                         {}).get('metricHeaderEntries', [])
        rows = report.get('data', {}).get('rows', [])
        print(report.get('nextPageToken'))
        for row in rows:
            lst = []
            dimensions = row.get('dimensions', [])
            dateRangeValues = row.get('metrics', [])
            for header, dimension in zip(dimensionHeaders, dimensions):
                lst.append(dimension)

            for i, values in enumerate(dateRangeValues):
                for metricHeader, value in zip(metricHeaders,
                                               values.get('values')):
                    lst.append(value)
                lst[1] = datetime.datetime.strptime(lst[1], '%Y%m%d').strftime(
                    '%Y-%m-%d 00:00:00')  #lst[1]+' 00:00:00'
                file.write(';'.join(lst).encode('utf-8') + '\n')
    while report.get('nextPageToken') is not None:
        try:
            analytics = initialize_analyticsreporting()
            response = get_report_iteration(analytics,
                                            report.get('nextPageToken'))
            for report in response.get('reports', []):

                columnHeader = report.get('columnHeader', {})
                dimensionHeaders = columnHeader.get('dimensions', [])
                metricHeaders = columnHeader.get('metricHeader', {}).get(
                    'metricHeaderEntries', [])
                rows = report.get('data', {}).get('rows', [])
                print('while', report.get('nextPageToken'))
                for row in rows:
                    lst = []
                    dimensions = row.get('dimensions', [])
                    dateRangeValues = row.get('metrics', [])

                    for header, dimension in zip(dimensionHeaders, dimensions):
                        lst.append(dimension)

                    for i, values in enumerate(dateRangeValues):
                        for metricHeader, value in zip(metricHeaders,
                                                       values.get('values')):
                            lst.append(value)
                        lst[1] = datetime.datetime.strptime(
                            lst[1], '%Y%m%d').strftime('%Y-%m-%d 00:00:00')
                        file.write(';'.join(lst).encode('utf-8') + '\n')

        except:
            time.sleep(15)
    print(appname + ' processed')
    file.close()
コード例 #27
0
def data_entry(fname):
    ins = []
    file = open(fname, 'a')
    while True:
        root = ''

        cat = input("\n\nEnter Category - Agent, Skill, Dataset:    ")
        if cat == 'a':
            root += 'agent'
        elif cat == 's':
            root += 'skill'
        elif cat == 'd':
            root += 'dataset'
        elif cat == 'q':
            break
        else:
            continue

        for i in range(1, 50):
            t = input("Enter root " + str(i) + ":  ").replace(' ', '').lower()
            if t == '': break
            root += '.' + t

        print('root: ' + root)

        for i in range(1, 50):
            key = input("Enter key " + str(i) + ":  ").replace(' ', '').lower()
            if key == '': break
            val = input("\nEnter value for " + root + '.' + key + ': ')
            if val == '':
                val = ' '.join([x.capitalize() for x in key.split('_')])
            j = {"key": root + '.' + key, "value": val}
            ins.append(j)
            json.dump(j, file, indent=4)
            file.write(',\n')
            print('\n', j)

    print(ins)
コード例 #28
0
 def record_audio(self):
     if args.samplerate is None:
         device_info = sd.query_devices(args.device, 'input')
         args.samplerate = int(device_info['default_samplerate'])
     newID = str(ID)
     if args.filename is None:
         args.filename = tempfile.mktemp(prefix=newID,
                                         suffix='.wav',
                                         dir='')
     with sf.SoundFile(args.filename,
                       mode='x',
                       samplerate=args.samplerate,
                       channels=args.channels,
                       subtype=args.subtype) as file:
         with sd.InputStream(samplerate=args.samplerate,
                             device=args.device,
                             channels=args.channels,
                             callback=callback):
             while True:
                 global recordFlag
                 file.write(q.get())
                 if (recordFlag == False):
                     break
コード例 #29
0
def add_course():

    try:
        file = open("courses_info.json", "r")
        json_data = json.load(file)

    except FileNotFoundError:
        file = open("courses_info.json", "w+")
        json_data = {"user": "******"}
        file.write(str(json_data))

    course_name = input("Enter the course code: ")
    instructor = input("Enter the instructor name: ")
    activities = {}
    course_data = {
        course_name: {
            "instructor": instructor,
            "activities": activities
        }
    }

    json_data.update(course_data)
    with open("courses_info.json", 'w') as file:
        json.dump(json_data, file)
コード例 #30
0
def get_time_next():
    now = datetime.datetime.utcnow().isoformat() + 'Z'
    service = credentials()
    events_result = service.events().list(calendarId='primary',
                                          timeMin=now,
                                          maxResults=10,
                                          singleEvents=True,
                                          orderBy='startTime').execute()
    x = events_result.get('items', [])
    if len(x) == 0:
        file = open("temp.txt", "w")
        file.write("Livre")
        file.close()
        return 0
    else:
        events = x[0]

    start = events['start'].get('dateTime', events['start'].get('date'))
    m_start = getmonth(start)
    d_start = getday(start)
    h_start = gettime2(start)
    file = open("temp.txt", "w")
    file.write("{}/{}/->{}".format(d_start, m_start, h_start))
    file.close()
コード例 #31
0
def sort_json(fname):
    print('\nSorting jsons in taxonomy.json...')
    txs = json.load(open(fname))['taxonomies']
    k = list({(j.get('key'), j.get('value')) for j in txs})
    k.sort(key=lambda x: x[0])
    file = open(fname, 'w').close()
    file = open(fname, 'a')
    file.write('{\n  "taxonomies": [')
    for i in range(0, len(k)):
        j = {"key": k[i][0], "value": k[i][1]}
        json.dump(j, file, indent=8)
        if i != len(k) - 1: file.write(',\n')
    file.write('\n  ]\n}')
    print('Done\n')
コード例 #32
0
def print_response(response, appname):
    """Parses and prints the Analytics Reporting API V4 response"""
    appname = appname.replace('/', '_')
    #os.system('rm -rf '+appname+'.csv')

    now = datetime.datetime.now()

    file = open(
        '/home/erowz/analytics_Script/SaveExtract/' + now.strftime("%Y%m%d") +
        "_" + appname + '.csv', 'w')
    file.write('page;sessions\n')
    print('processing ' + appname + '. . .')
    for report in response.get('reports', []):

        columnHeader = report.get('columnHeader', {})
        dimensionHeaders = columnHeader.get('dimensions', [])
        metricHeaders = columnHeader.get('metricHeader',
                                         {}).get('metricHeaderEntries', [])
        rows = report.get('data', {}).get('rows', [])
        print(report.get('nextPageToken'))
        for row in rows:
            lst = []
            dimensions = row.get('dimensions', [])
            dateRangeValues = row.get('metrics', [])

            for header, dimension in zip(dimensionHeaders, dimensions):
                #print header + ': ' + dimension
                lst.append(dimension)

            for i, values in enumerate(dateRangeValues):
                #print 'Date range (' + str(i) + ')'
                for metricHeader, value in zip(metricHeaders,
                                               values.get('values')):
                    #print metricHeader.get('name') + ': ' + value
                    lst.append(value)
            if '?' in lst[0]:
                pass
            elif '&' in lst[0]:
                pass
            elif '=' in lst[0]:
                pass
            elif ',' in lst[0]:
                pass
            if int(lst[1]) >= 2:

                file.write(';'.join(lst).encode('utf-8') + '\n')

    while report.get('nextPageToken') is not None:
        try:
            analytics = initialize_analyticsreporting()
            response = get_report_iteration(analytics,
                                            report.get('nextPageToken'))
            for report in response.get('reports', []):

                columnHeader = report.get('columnHeader', {})
                dimensionHeaders = columnHeader.get('dimensions', [])
                metricHeaders = columnHeader.get('metricHeader', {}).get(
                    'metricHeaderEntries', [])
                rows = report.get('data', {}).get('rows', [])
                print('while', report.get('nextPageToken'))
                for row in rows:
                    lst = []
                    dimensions = row.get('dimensions', [])
                    dateRangeValues = row.get('metrics', [])

                    for header, dimension in zip(dimensionHeaders, dimensions):
                        #print header + ': ' + dimension
                        lst.append(dimension)

                    for i, values in enumerate(dateRangeValues):
                        #print 'Date range (' + str(i) + ')'
                        for metricHeader, value in zip(metricHeaders,
                                                       values.get('values')):
                            #print metricHeader.get('name') + ': ' + value
                            lst.append(value)
                    if '?' in lst[0]:
                        pass
                    elif '&' in lst[0]:
                        pass
                    elif '=' in lst[0]:
                        pass
                    elif ',' in lst[0]:
                        pass
                    if int(lst[1]) >= 2:
                        file.write(';'.join(lst).encode('utf-8') + '\n')

        except:
            time.sleep(15)
    print(appname + ' processed')

    file.close()
コード例 #33
0
ファイル: gmailNotifier.py プロジェクト: tux2603/Configs
        pass

    # Check if new and old lists are the same
    sameList = len(unreadMessages) == len(lastKnownUnread)
    if sameList:
        sameList = checkIfSameList(unreadMessages, lastKnownUnread)

    # If new and old lists are the same, cycle the ones in the file
    if sameList:
        if len(unreadMessages) > 0:
            lastKnownUnread.append(lastKnownUnread.pop(0))

            # Overwrite the file with cycled messages
            file = open('/home/rjslater/.config/i3blocks/scripts/.gmailMessages', 'w')
            for message in lastKnownUnread:
                file.write(message[0] + '*.*.*.*' + message[1] + '\n')
            file.close()

            # Display top old message from cycled list
            outputString = '\uf0e0 {} {}: {}'.format(len(lastKnownUnread), lastKnownUnread[0][1], lastKnownUnread[0][0])
            print(outputString[:60])

    # Else overwrite the file with the updated list
    else:
        file = open('/home/rjslater/.config/i3blocks/scripts/.gmailMessages', 'w')
        for message in unreadMessages:
            file.write(message[0] + '*.*.*.*' + message[1] + '\n')
        file.close()

        # Display top new message
        if len(unreadMessages) > 0: