def generate_jwt(service_account_file):
    """Generates a signed JSON Web Token using a Google API Service Account."""
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        service_account_file)

    now = int(time.time())

    payload = {
        'iat': now,
        'exp': now + credentials.MAX_TOKEN_LIFETIME_SECS,
        # aud must match 'audience' in the security configuration in your
        # swagger spec. It can be any string.
        'aud': 'echo.endpoints.sample.google.com',
        # iss must match 'issuer' in the security configuration in your
        # swagger spec. It can be any string.
        'iss': 'jwt-client.endpoints.sample.google.com',
        # sub and email are mapped to the user id and email respectively.
        'sub': '12345678',
        'email': '*****@*****.**'
    }

    signed_jwt = oauth2client.crypt.make_signed_jwt(
        credentials._signer, payload, key_id=credentials._private_key_id)

    return signed_jwt
Ejemplo n.º 2
0
    def from_service_account_json(cls, json_credentials_path, *args, **kwargs):
        """Factory to retrieve JSON credentials while creating client.

        :type json_credentials_path: string
        :param json_credentials_path: The path to a private key file (this file
                                      was given to you when you created the
                                      service account). This file must contain
                                      a JSON object with a private key and
                                      other credentials information (downloaded
                                      from the Google APIs console).

        :type args: tuple
        :param args: Remaining positional arguments to pass to constructor.

        :type kwargs: dict
        :param kwargs: Remaining keyword arguments to pass to constructor.

        :rtype: :class:`gcloud.pubsub.client.Client`
        :returns: The client created with the retrieved JSON credentials.
        :raises: :class:`TypeError` if there is a conflict with the kwargs
                 and the credentials created by the factory.
        """
        if "credentials" in kwargs:
            raise TypeError("credentials must not be in keyword arguments")
        credentials = ServiceAccountCredentials.from_json_keyfile_name(json_credentials_path)
        kwargs["credentials"] = credentials
        return cls(*args, **kwargs)
Ejemplo n.º 3
0
    def __init__(self, config):
        self.api_key = config["apiKey"]
        self.auth_domain = config["authDomain"]
        self.database_url = config["databaseURL"]
        self.storage_bucket = config["storageBucket"]
        self.credentials = None
        self.requests = requests.Session()
        if config.get("serviceAccount"):
            self.service_account = config["serviceAccount"]
            scopes = [
                'https://www.googleapis.com/auth/firebase.database',
                'https://www.googleapis.com/auth/userinfo.email',
                "https://www.googleapis.com/auth/cloud-platform"
            ]
            self.credentials = ServiceAccountCredentials.from_json_keyfile_name(config["serviceAccount"], scopes)
        if is_appengine_sandbox():
            # Fix error in standard GAE environment
            # is releated to https://github.com/kennethreitz/requests/issues/3187
            # ProtocolError('Connection aborted.', error(13, 'Permission denied'))
            adapter = appengine.AppEngineAdapter(max_retries=3)
        else:
            adapter = requests.adapters.HTTPAdapter(max_retries=3)

        for scheme in ('http://', 'https://'):
            self.requests.mount(scheme, adapter)
Ejemplo n.º 4
0
def main():
	scope = ['https://spreadsheets.google.com/feeds']

	credentials = ServiceAccountCredentials.from_json_keyfile_name(Key_File, scope)
	gc = gspread.authorize(credentials)
	print 'getting values', gc

	print 'running the open_by_key'
	sh = gc.open_by_key('1dzeUJHLZIcIKl3mlMfY5-uFluTwPcVGNHW3OqcSViOY')
	print 'shit went through'
	worksheet = sh.get_worksheet(0)
	
	val = worksheet.acell('B1').value #show value in B1
	print "value before change when there's single worksheet was :", val

	worksheet.update_acell('B1', '42') #change value in B1 to 42 from asdasd
	val2 = worksheet.acell('B1').value
	t1 = (time.strftime("%m-%d-%Y %H:%M:%S"))
	worksheet.update_acell('G1', str(t1))
	print 'value changing right now, and it is changing in B1 from ', val, 'to', val2
	print 'current time is :', t1


#	newwks = sh.add_worksheet(title="new added for testing", rows="20", cols="20") #add a new worksheet
#	print 'new worksheet added as title', newwks
	newwks = sh.get_worksheet(2)
	newwks.update_acell('A1', '33') #update cell A1 to 33 from empty
	val3 = newwks.acell('A1').value
	print 'cell A1 is updated to :', val3

	cell_list = worksheet.range('A1:B2')
	for cell in cell_list:
		cell.value = str(t1)
	worksheet.update_cells(cell_list)
	print 'worksheet, Sheet1, was changed on A1:B2 from original values to 123'
Ejemplo n.º 5
0
    def __init__(self, creds_file):

        credentials = ServiceAccountCredentials.from_json_keyfile_name(
            creds_file, SCOPES)
        self.gc = gspread.authorize(credentials)
        self.storage_service = discovery.build('storage', 'v1',
                                               credentials=credentials)
Ejemplo n.º 6
0
    def __init__(self, account_json):

        scopes = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite']
        credentials = ServiceAccountCredentials.from_json_keyfile_name(account_json, scopes)
        self.dns = discovery.build('dns', 'v1', credentials=credentials, cache_discovery=False)
        with open(account_json) as account:
            self.project_id = json.load(account)['project_id']
Ejemplo n.º 7
0
def get_worksheet():
    scope = ['https://spreadsheets.google.com/feeds']
    credentials = ServiceAccountCredentials.from_json_keyfile_name(app.config['JSON_KEYFILE_NAME'],scope);
    gc = gspread.authorize(credentials)
    sheet = gc.open_by_key(app.config['GOOGLE_SHEET_KEY'])
    worksheet = sheet.get_worksheet(0)
    return worksheet
Ejemplo n.º 8
0
def write_sheet(keyword, rows):
  credentials = ServiceAccountCredentials.from_json_keyfile_name(G_SERCRET, G_SCOPES)
  service = discovery.build('sheets', 'v4', credentials=credentials)
  spreadsheet_id = G_SHEET_ID

  rangeName = keyword + '!A1:A'
  result = service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=rangeName).execute()
  values = result.get('values', [])
  curRowsLen =len(values)

  if not values:
    print('sheet has no data')

  batch_update_values_request_body = {
    "valueInputOption": "USER_ENTERED",
    "data": [
      {
        "range": keyword + "!A" + str(curRowsLen+1) + ":E" + str(curRowsLen + len(rows)),
        "majorDimension": "ROWS",
        "values": rows
      }
    ]
  }

  #pprint(batch_update_values_request_body)
  request = service.spreadsheets().values().batchUpdate(spreadsheetId=spreadsheet_id,body=batch_update_values_request_body)
  response = request.execute()

  # TODO: Change code below to process the `response` dict:
  pprint(response)
  return response
def get_sheet_data_from_url(sheet_url):
    "get the data for the sheet URL as CSV"
    #TODO: copy code name version? maybe just toss the name version
    import gspread
    from oauth2client.service_account import ServiceAccountCredentials

    # see https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html

    # use creds to create a client to interact with the Google Drive API
    scope = ['https://spreadsheets.google.com/feeds']

    # follow the instructions in the blog above carefully to get this.
    creds = ServiceAccountCredentials.from_json_keyfile_name('/home/matz/DCSFetchMarks-3cf40810a20f.json', scope)

    client = gspread.authorize(creds)
    # Find the workbook by URL and open the first sheet
    try:
        work_book = client.open_by_url(sheet_url)
        assert work_book
        sheet = work_book.sheet1
        assert sheet
        csv_file_data = sheet.export(format='csv')
        assert csv_file_data
        return csv_file_data
    except:
        import traceback,sys
        print("failed to open sheet", sheet_url)
        traceback.print_exc(file=sys.stdout)
        exit(2)
Ejemplo n.º 10
0
def lambda_handler(event, context):
    global config

    # Load the configuration (s3_bucket, s3_key, sheet_id)
    with open('config.json') as data_file:
        config = json.load(data_file)

    # Connect to Google Sheets and open the sheet
    # Ensure the sheet is shared with the service
    # account email address ([email protected])
    scopes = ['https://spreadsheets.google.com/feeds']
    credentials = ServiceAccountCredentials.from_json_keyfile_name('credentials.json', scopes=scopes)
    gc = gspread.authorize(credentials)
    sheet = gc.open_by_key(config['sheet_id']).worksheet(config['worksheet_name'])

    # Get the values
    gval = sheet.range("A1:B" + str(sheet.row_count))
    data = {}

    # Get every key => value for A => B (If A is not blank)
    for i in range(sheet.row_count):
        if i % 2 == 0 and gval[i].value != '':
            data[gval[i].value] = str(gval[i + 1].value)

    # Encode into JSON
    jsonstr = json.dumps(data)

    # Print or upload to S3
    if debug:
        print jsonstr
    else:
        return upload_to_s3(jsonstr)

    return
Ejemplo n.º 11
0
 def __init__(self, credential_path, spreadsheet_name):
     scope = ['https://spreadsheets.google.com/feeds']
     credentials = ServiceAccountCredentials.from_json_keyfile_name(credential_path, scope)
     self.gc = gspread.authorize(credentials)
     logging.info('Sheet service client authorized, credential path: %s' % credential_path)
     self.spreadsheet = self.gc.open(spreadsheet_name)
     pass
Ejemplo n.º 12
0
 def __init__(self, sheet_id):
     scope = ['https://spreadsheets.google.com/feeds']
     credentials = ServiceAccountCredentials.from_json_keyfile_name(os.path.join(os.path.dirname(__file__),
                                                                                 'service_key.json'), scope)
     self.gc = gspread.authorize(credentials)
     sht = self.gc.open_by_key(sheet_id)
     self.worksheet = sht.get_worksheet(0)
def go(startdate, enddate):
    credentials = ServiceAccountCredentials.from_json_keyfile_name('daily_goals.json', ['https://spreadsheets.google.com/feeds'])
    gc = gspread.authorize(credentials)
    wks = gc.open("Daily Goals").worksheet("Sheet1")
    rows = wks.get_all_values()

    startdate = datetime.datetime.strptime(startdate, '%Y-%m-%d')
    enddate = datetime.datetime.strptime(enddate, '%Y-%m-%d')

    results = {}
    results['rows'] = []

    def str2num(val):
        if not val:
            return 0
        else:
            return float(val)

    for row in rows:
        date_str, wateroz, vitamins, scripture_study, exercised, pullups, divebombpushups, calories, sevenminuteworkout, weight, sat_fat_grams, sol_fiber_grams, hours_slept, servings_fruit_veg = row
        try:
            dateobj = datetime.datetime.strptime(date_str.split(' ')[0], '%Y-%m-%d')
        except ValueError:
            continue
        dateobj = dateobj - datetime.timedelta(days=1)
        if (startdate <= dateobj <= enddate) and (dateobj.weekday() != 6):
            results['rows'].append({'date_str': dateobj.date().strftime('%Y-%m-%d'),
                'physical_activity_description': 'walking',
                'activity_minutes': exercised,
                'water_5_or_more_cups': (str2num(wateroz)/8) >= 5,
                'fruit_veg_4_or_more_servings': str2num(servings_fruit_veg) >= 4,
                'sleep_7_or_more_hours': str2num(hours_slept) >= 7})
    print(json.dumps(results))
Ejemplo n.º 14
0
def get_gspread(SS_ADDRESS, sheetname):
	scope_gs = ['https://spreadsheets.google.com/feeds']
	credentials_gs = ServiceAccountCredentials.from_json_keyfile_name(KEY, scope_gs)
	gc = authorize(credentials_gs)
	sh = gc.open_by_url(SS_ADDRESS)
	worksheet = sh.worksheet(sheetname)
	return worksheet
Ejemplo n.º 15
0
def get_google_service():
	scope_gl = 'https://www.googleapis.com/auth/spreadsheets'
	credentials_gl = ServiceAccountCredentials.from_json_keyfile_name(KEY, scope_gl)
	http = credentials_gl.authorize(httplib2.Http())
	discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?version=v4')
	service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl)
	return service
Ejemplo n.º 16
0
    def auth_update(self, json_file, filename):
        """
        Authenticate the credentials and update the text file of bad words
        :param json_file: Service account key of Google API
        :param filename: Name of the Google Sheet
        :return: A text file of bad word, updated from Google Sheets
        """
        scope = ['https://spreadsheets.google.com/feeds',
                 'https://www.googleapis.com/auth/drive']

        credentials = ServiceAccountCredentials.from_json_keyfile_name(json_file, scope)

        gc = gspread.authorize(credentials)

        wks = gc.open(filename).sheet1

        values_list = wks.col_values(1)

        file = open('Cleanify/data/badwords.txt', 'w')

        for values in values_list:
            if not values:
                continue
            else:
                values = values.strip()
                file.write(values + '\n')
 def 提著資料表(self):
     登入憑證 = ServiceAccountCredentials.from_json_keyfile_name(
         self.key_file_name, self.google_sheet_scope
     )
     return gspread.authorize(登入憑證).open_by_url(
         self.url
     ).sheet1
Ejemplo n.º 18
0
def login_open_sheet(oauth_key_file, spreadsheet):
    """Connect to Google Docs spreadsheet and return the first worksheet."""
    scope =  ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
    credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)
    gc = gspread.authorize(credentials)
    worksheet = gc.open(spreadsheet).sheet1
    return worksheet
Ejemplo n.º 19
0
def _buildAdminService():
    """Create an HTTP session specifically for GOOGLE ADMIN SDK functions using ADMIN_USER"""
    credentials = ServiceAccountCredentials.from_json_keyfile_name(KEYFILE, SCOPES)
    delegated_credentials = credentials.create_delegated(ADMIN_USER)
    http_auth = delegated_credentials.authorize(Http())
    service = discovery.build('admin', 'directory_v1', http=http_auth)
    return service
Ejemplo n.º 20
0
def get_all_elgible_email_address():
	global all_email_addresses
	global all_first_names
	global all_full_names

	credentials = ServiceAccountCredentials.from_json_keyfile_name(SECRETS, scopes=SCOPES)
	gc = gspread.authorize(credentials)
	sh = gc.open_by_key('1Kodv_Fzz9Oki6q9w14jGddP49XFWD8VnXfFlxyViMVY');
	email_worksheet = sh.get_worksheet(0)
	first_col = grab_col_safe(email_worksheet, 1)
	all_data = grab_all_data_safe(email_worksheet)
	emails = []

	# Skip over header entry
	for i in range(1, len(first_col)):
		if(first_col[i] != ''):
			row_index = i
			# 1st column is first name, 2nd column is nickname, 3rd column is last
			full_name = all_data[row_index][0]
			if(all_data[row_index][1] != ''):
				first_name = all_data[row_index][1]
				full_name += ' "' + all_data[row_index][1] + '"'
			else:
				first_name = all_data[row_index][0]
			full_name += ' ' + all_data[row_index][2]
			
			all_first_names.append(first_name)
			all_full_names.append(full_name)
			# The 4th column has the email data
			all_email_addresses.append(all_data[row_index][3])
Ejemplo n.º 21
0
def _buildEmailService():
    """Create an HTTP session specifically for the GMAIL API and sending emails from the EMAIL_USER account"""
    credentials = ServiceAccountCredentials.from_json_keyfile_name(KEYFILE, SCOPES)
    delegated_credentials = credentials.create_delegated(EMAIL_USER)
    http_auth = delegated_credentials.authorize(Http())
    service = discovery.build('gmail', 'v1', http=http_auth)
    return service
Ejemplo n.º 22
0
    def plotEnemies(self, inDetail=False):
        scope = ['https://spreadsheets.google.com/feeds']
        credentials = ServiceAccountCredentials.from_json_keyfile_name('HiveGrid-f91dbd05eabd.json', scope)
        gc = gspread.authorize(credentials)
        sheet = gc.open("FF2 Hive").sheet1
        enemySheet = gc.open("FF2 Hive").worksheet('Enemies')
        enemyData = enemySheet.get_all_records(False, 1)
        #d = {"first_name": "Alfred", "last_name": "Hitchcock"}

        for eDict in enemyData:
            ## print("{} ".format(val['Name']))
            # print("  ")
            #for key, val in eDict.items():
                # print("{} = {}".format(key, val))
            eLv = eDict['Castle Lv']
            eTag = eDict['Guild Tag']
            eName = eDict['Name']
            eX = eDict['X Cord']
            eY = eDict['Y Cord']
            if (eTag == 'FF1' or eTag == 'FF4'):
                myEnum = 'ALLY'
            else:
                myEnum = 'ENEMY'
            if(inDetail == True):
                if(eTag == ""):
                    str = "{} - {} {} ({}:{})".format(myEnum, eLv,eName,eX,eY)
                else:
                    str = "{} - {} [{}]{} ({}:{})".format(myEnum, eLv,eTag,eName,eX,eY)
            else:
                str = "{} Lv{}".format(myEnum, eLv)

            # "ENEMY - Lv " + eLv + eTag + " " + eName
            print(str)
            sheet.update_cell(eY, eX, str)
Ejemplo n.º 23
0
def renewed_worksheet():
	if(WORKSHEET_TITLE == ''):
		print_write("FATAL: Worksheet title does not exit")
		sys.exit(-1)
	times_attempted = 0
	max_num_attempts = 86400
	time_of_initial_attempt = time.ctime()
	while(times_attempted < max_num_attempts):
		try:
			times_attempted = times_attempted + 1
			credentials = ServiceAccountCredentials.from_json_keyfile_name(SECRETS, scopes=SCOPES)
			gc = gspread.authorize(credentials)
			sh = gc.open_by_key(LINKED_SPREADSHEET_KEY)
			authenticated_worksheet = sh.worksheet(WORKSHEET_TITLE)
			time.sleep(2) # Ensure we don't call Google APIs too rapidly
			break
		except Exception as e: #Maybe we are using APIs too much. Try again after waiting
			print e
			verify_internet_access()
			time.sleep(10)
	if(times_attempted == max_num_attempts):
		print "FATAL: Unable to recover"
		sys.exit(-1)
	else:
		if(times_attempted > 1):
			print_write("[" + time_of_initial_attempt + ", " + time.ctime() + "]" + " Worksheet successfully renewed after " + str(times_attempted) + " tries")
		return authenticated_worksheet
Ejemplo n.º 24
0
def get_service_acct_creds(key_file, verbose=False):
  '''Generate service account credentials using the given key file.
    key_file: path to file containing private key.
  '''
  ### backcompatability for .p12 keyfiles
  if key_file.endswith('.p12') or key_file.endswith('.pem'):
    from edx2bigquery_config import auth_service_acct as SERVICE_ACCT
    if verbose:
      print "using key file"
      print "service_acct=%s, key_file=%s" % (SERVICE_ACCT, KEY_FILE)
    try:
      creds = ServiceAccountCredentials.from_p12_keyfile(
        SERVICE_ACCT,
        key_file,
        scopes=BIGQUERY_SCOPE)
    except Exception as err:			# fallback to old google SignedJwtAssertionCredentials call
      with open (key_file, 'rb') as f:
        key = f.read();
        creds = SignedJwtAssertionCredentials(
          SERVICE_ACCT, 
          key,
          BIGQUERY_SCOPE)
    return creds
  ###
  creds = ServiceAccountCredentials.from_json_keyfile_name(
    key_file,
    BIGQUERY_SCOPE)
  return creds
def get_authenticated_service_s2s():
    """
    使用oauth2 server to server的方式获取认证的google服务
    :param user:
    :return:
    """
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        GOOGLE_KEY_FILE,
        SCOPES)

    #SETTING_FILE = 'production'
    # 如果是VPS中运行,则不使用代理
    if SETTING_FILE == 'production':
        proxy_http = None

    # 如果是本地运行,则使用代理
    if SETTING_FILE == 'local':
        myproxy = httplib2.ProxyInfo(
            proxy_type=httplib2.socks.PROXY_TYPE_HTTP,
            proxy_host='127.0.0.1', proxy_port=8118)
        proxy_http = httplib2.Http(proxy_info=myproxy)

    youtube_service = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
                            credentials=credentials, http=proxy_http)

    return youtube_service
Ejemplo n.º 26
0
def get_credentials():
    """Gets valid user credentials from storage.

    If nothing has been stored, or if the stored credentials are invalid,
    the OAuth2 flow is completed to obtain the new credentials.

    Returns:
        Credentials, the obtained credential.
    """

    home_dir = os.path.expanduser('~')
    credential_dir = os.path.join(home_dir,'Google Drive/G/IT/Development/ETL/.credentials')
    if not os.path.exists(credential_dir):
        os.makedirs(credential_dir)
    credential_path = os.path.join(credential_dir,'client_secret_anchorpath_service.json')
    store = oauth2client.file.Storage(credential_path)
    #scopes = ['https://www.googleapis.com/auth/sqlservice.admin']
    SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'
    credentials = ServiceAccountCredentials.from_json_keyfile_name(credential_path, scopes=SCOPES)
    if not credentials or credentials.invalid:
        flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
        flow.user_agent = APPLICATION_NAME
        if flags:
            credentials = tools.run_flow(flow, store, flags)
        else: # Needed only for compatibility with Python 2.6
            credentials = tools.run(flow, store)
        print('Storing credentials to ' + credential_path)
    return credentials
Ejemplo n.º 27
0
def requirements(request):
    scope = ['https://spreadsheets.google.com/feeds']
    credentials = ServiceAccountCredentials.from_json_keyfile_name('UPE General Requirements-fe3bf88c2518.json', scope)
    gc = gspread.authorize(credentials)
    wks = gc.open("UPE General Requirements").sheet1
    user = request.user
    up = UserProfile.objects.get(user=user)

    user_cell = wks.find(user.first_name + " " + user.last_name)

    req_list = []
    for col in range(1, wks.col_count+1):
        if wks.cell(1, col).value != "":
            req_list.append(wks.cell(1, col))
        else:
            break

    req_dict = []
    for req in req_list:
        req_dict.append((req, wks.cell(user_cell.row, req.col)))

    if request.method == 'POST':
        if request.POST['name'] == 'committee':
            user.committee = request.POST['value']
            user.save()
        elif request.POST['name'] == 'name':
            name = request.POST['value']
            p = re.compile('([a-zA-Z]+)\\s+([a-zA-Z]+)')
            m = p.match(name)
            user.first_name = m.group(1)
            user.last_name = m.group(2)
            user.save()

    return render_to_response('users/requirements.html', 
            context_instance=RequestContext(request,{'req_dict': req_dict, 'up':up}))
Ejemplo n.º 28
0
	def __get_google_contacts(self):
		"""
		Получаем данные дней рождения из гугл-файлика с контактами
		"""

		# авторизуемся в гугле
		credentials = ServiceAccountCredentials.from_json_keyfile_name(self.GOOGLE_API_KEY, scope)
		self.gc = gspread.authorize(credentials)

		wks = self.gc.open_by_url(self.GOOGLE_CONTACT_FILE).sheet1

		contacts = []
		names = wks.col_values(1)[1:]
		emails = wks.col_values(3)[1:]
		birsdays = wks.col_values(6)[1:]

		for name in names:
			if name and birsdays[names.index(name)]:
				contacts.append({
					"name": name.strip(),
					"birsday": date_parse(birsdays[names.index(name)].strip().lower())\
									.strftime('%Y/%m/%d'),
					"email": emails[names.index(name)]\
								.strip().replace('\n', '').replace('\t', '').replace(' ', '').lower(),
					"slack_id": ""
				})

		contacts = self.__associate_slack_id_to_contacts(contacts)

		# кешируем в файл
		with open(self.CONTACT_FILE, 'w+') as f:
			f.write(json.dumps(contacts))

		return contacts
Ejemplo n.º 29
0
def importDataDef():
    import gspread
    from oauth2client.service_account import ServiceAccountCredentials
    
    scope = ['https://spreadsheets.google.com/feeds']
    
    credentials = ServiceAccountCredentials.from_json_keyfile_name('socialmapkorea-credentials.json', scope)
    
    gc = gspread.authorize(credentials)
    
    wks = gc.open("socialmapkorea_data").sheet1
    
    code_list = list(filter(lambda x: len(x)>0, wks.col_values(1)))
    name_list = list(filter(lambda x: len(x)>0, wks.col_values(2)))
    name_list_u = list(map(lambda x: x, name_list))
    data_def_list = list(map(lambda x,y: [x,y], code_list, name_list_u))
    
    deleteAll('Data_Def')
    
    with client.transaction():
        incomplete_keys = client.key('Data_Def')
        for item in data_def_list:
            print(str(item))
            datadefEntity = datastore.Entity(key=incomplete_keys)
            datadefEntity.update({
                                  'code': item[0],
                                  'name': item[1]})
            client.put(datadefEntity)
            print(datadefEntity.key) 
Ejemplo n.º 30
0
def main():
    hostname = socket.gethostname()
    dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script directory

    user = dir[:dir.find('/',len('/home/'))]
    client_secret =  user + '/.ssh/otros/errBot Youtube-7ff8701bdfdd.json'
    sheet_name = 'Registro IPs'
    
    # use creds to create a client to interact with the Google Drive API
    scope = ['https://spreadsheets.google.com/feeds']
    creds = ServiceAccountCredentials.from_json_keyfile_name(client_secret, scope)
    client = gspread.authorize(creds)
    
    sheet = client.open(sheet_name)
    
    
    if (len(sys.argv)>1 and (sys.argv[1] == "-l")):
        listIps(sheet)
    else:
        try:
            worksheet = sheet.worksheet(hostname)
        except gspread.exceptions.WorksheetNotFound:
            worksheet = sheet.add_worksheet(hostname, 5, 5)
        
        ip = getIp()
        print(hostname, user, ip, time.time())
        worksheet.insert_row([time.time(), ip, user],2)
Ejemplo n.º 31
0
def job_function():
    print("Hello World")
    print(datetime.datetime.now())

    # use creds to create a client to interact with the Google Drive API
    scope = ['https://spreadsheets.google.com/feeds']
    creds = ServiceAccountCredentials.from_json_keyfile_name(
        'bitcoin railroad-8cc9cd351748.json', scope)
    client = gspread.authorize(creds)

    # Find a workbook by name and open the first sheet
    # Make sure you use the right name here.
    sheet = client.open("bitcoin railroad registration").sheet1

    # In[21]:

    list_of_lists = sheet.get_all_values()
    df = pd.DataFrame(list_of_lists[1:])
    df = df[[2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]]
    df.columns = [
        'email', 'coin1', 'coin2', 'coin3', 'coin4', 'coin5', 'url1', 'url2',
        'url3', 'url4', 'url5'
    ]
    df = df[df.email != ""]

    # In[8]:

    ## FILL THIS IN:

    client_id = ""
    client_secret = ""
    username = ""
    password = ""
    user_agent = ""

    reddit = praw.Reddit(client_id=client_id,
                         client_secret=client_secret,
                         password=password,
                         username=username,
                         user_agent=user_agent)

    keywords = [
        "release", "listing", "partnership", "campaign", "conference", "rumor",
        "china", "korea", "japan", "government"
    ]

    # In[115]:

    # In[120]:

    now_hour = datetime.datetime.now().hour
    now_minute = datetime.datetime.now().minute

    # In[ ]:

    # FOR NEW SUBSCRIBERS

    # for idx, row in df.iterrows():

    #     email = df.loc[idx][0]
    #     if email not in email_list:

    #         print(email)

    #         tokens = []
    #         for i in (0,1,2,3,4):
    #             tokens.append(df.loc[idx][i+1])

    #         price_texts = []

    #         for token in tokens:
    #             print(tokens)
    #             print(token)
    #             price_time = minute_price_historical(token, 'BTC', 1, 1).iloc[-1]['timestamp']
    #             close = minute_price_historical(token, 'BTC', 1, 1).iloc[-1]['close']
    #             time_text = price_time.strftime("%H:%M") + " EST"
    #             high30 = daily_price_historical(token, 'BTC', 1, 1)[-31:-1]['high'].max()
    #             low30 = daily_price_historical(token, 'BTC', 1, 1)[-31:-1]['low'].min()

    #             price_text = "<h2>$" + str(token) + ": " + str(close) + " sats (" + str(time_text) + ") </h2> <h3>30-day high (low): " + str(high30) + " (" + str(low30) + ") sats</h3><br>"

    #             print(price_text)

    #             price_texts.append(price_text)

    #         prices = ''.join(price_texts)
    #         print(prices)

    #         reddit_urls = []
    #         reddit_urls.append(df.loc[idx][6][25:])
    #         reddit_urls.append(df.loc[idx][7][25:])
    #         reddit_urls.append(df.loc[idx][8][25:])
    #         reddit_urls.append(df.loc[idx][9][25:])
    #         reddit_urls.append(df.loc[idx][10][25:])

    #         news_all = []

    #         #print(df.loc[idx][idx2+1])
    #         for keyword in keywords:
    #             print("Searching for  " + keyword + "...")
    #             news = collectnews(reddit, reddit_urls, keyword = keyword, period = "day")
    #             #for item in news:
    #                 #item[0] = df.loc[idx][idx2+1]
    #             #print(df.loc[idx+1])
    #             print(news)
    #             news_all = news_all + news

    #             print("All News: ", news_all)

    #         no_news_toggle = False
    #         if news_all == [] or len(news_all) < 5:
    #             no_news_toggle = True
    #             print("No news found. Searching in bitcoin and ethereum...")
    #             reddit_urls = []
    #             reddit_urls.append("bitcoin")
    #             reddit_urls.append("ethereum")

    #             for keyword in keywords:
    #                 print("Searching for  " + keyword + "...")
    #                 news = collectnews(reddit, reddit_urls, keyword = keyword, period = "hour")
    #                 #for item in news:
    #                     #item[0] = df.loc[idx][idx2+1]
    #                 #print(df.loc[idx+1])
    #                 print(news)
    #                 news_all = news_all + news

    #         #news_all = dict((x[3], x) for x in news_all).values()
    #         news_all = sorted(news_all)

    #         #select coin/reddit for first instance to set as title
    #         search = news_all[0][0]
    #         for news in news_all[1:]:
    #            if news[0] == search:
    #                news[0] = ""
    #            else:
    #                search = news[0]

    #         news_all_lines = []
    #         for news in news_all:
    #             extracted = tldextract.extract(news[3])
    #             domain = "{}.{}".format(extracted.domain, extracted.suffix)

    #             news_all_lines.append("""<br><h2><div align="center">""" + str(news[0]).upper() + "</div></h2> [" + domain.capitalize() + """] <a href=" """ + str(news[3]) + """ ">""" + str(news[1]) + "</a>")

    #             news_all_final = ' '.join([news_line for news_line in news_all_lines])
    #         # In[171]:
    #         if no_news_toggle == True:
    #             no_news = "<br><h3> Check back tomorrow for more news on your favorite coins. In the meantime, check out these news in bitcoin/ethereum below! </h3><br>"
    #         else:
    #             no_news = ""

    # FILL THIS IN:

    # user = "" #Email to send from
    #  pwd = "" #Password of email to send from
    #  recipient = "" # Receiving email

    #         body = retrieve_html()
    #         msg = Template(body).safe_substitute(comment = news_all_final, price = prices, no_news = no_news)

    #         # now = datetime.datetime.now()
    #         # if now.hour > 12:
    #         #     subject = str(now.month) + "/" + str(now.day) + " Reddit News " + str(now.hour - 12) + "pm"
    #         # else:
    #         #     subject = str(now.month) + "/" + str(now.day) + " Reddit News " + str(now.hour) + "am"

    #         now = datetime.datetime.now()
    #         nowhour = str(now.month)

    #         d0 = datetime.date(2017, 10, 25)
    #         d1 = datetime.date.today()
    #         delta = d1 - d0

    #         subject = "Bitcoin Railroad: Train #" + str(delta.days)

    #         if msg:
    #             send_email(user, pwd, recipient, subject, msg)

    #         part = MIMEText(body, 'html')

    #         print("end")

    #         email_list.append(email)

    # In[92]:

    if (now_hour == 7 and now_minute == 0):

        for idx, row in df.iterrows():

            try:

                #Retrieve token symbols
                tokens = []
                for i in (0, 1, 2, 3, 4):
                    tokens.append(df.loc[idx][i + 1])

                price_texts = []

                for token in tokens:
                    print(tokens)
                    print(token)
                    price_time = minute_price_historical(
                        token, 'BTC', 1, 1).iloc[-1]['timestamp']
                    close = minute_price_historical(token, 'BTC', 1,
                                                    1).iloc[-1]['close']
                    time_text = price_time.strftime("%H:%M") + " EST"
                    high30 = daily_price_historical(token, 'BTC', 1,
                                                    1)[-31:-1]['high'].max()
                    low30 = daily_price_historical(token, 'BTC', 1,
                                                   1)[-31:-1]['low'].min()

                    price_text = "<h2>$" + str(token) + ": " + str(
                        close) + " sats (" + str(
                            time_text
                        ) + ") </h2> <h3>30-day high (low): " + str(
                            high30) + " (" + str(low30) + ") sats</h3><br>"

                    print(price_text)

                    price_texts.append(price_text)

                prices = ''.join(price_texts)
                print(prices)

                reddit_urls = []
                reddit_urls.append(df.loc[idx][6][25:])
                reddit_urls.append(df.loc[idx][7][25:])
                reddit_urls.append(df.loc[idx][8][25:])
                reddit_urls.append(df.loc[idx][9][25:])
                reddit_urls.append(df.loc[idx][10][25:])

                news_all = []

                #print(df.loc[idx][idx2+1])
                for keyword in keywords:
                    print("Searching for  " + keyword + "...")
                    news = collectnews(reddit,
                                       reddit_urls,
                                       keyword=keyword,
                                       period="day")
                    #for item in news:
                    #item[0] = df.loc[idx][idx2+1]
                    #print(df.loc[idx+1])
                    print(news)
                    news_all = news_all + news

                    print("All News: ", news_all)

                no_news_toggle = False
                if news_all == [] or len(news_all) < 5:
                    no_news_toggle = True
                    print(
                        "No news found. Searching in bitcoin and ethereum...")
                    reddit_urls = []
                    reddit_urls.append("bitcoin")
                    reddit_urls.append("ethereum")

                    for keyword in keywords:
                        print("Searching for  " + keyword + "...")
                        news = collectnews(reddit,
                                           reddit_urls,
                                           keyword=keyword,
                                           period="hour")
                        #for item in news:
                        #item[0] = df.loc[idx][idx2+1]
                        #print(df.loc[idx+1])
                        print(news)
                        news_all = news_all + news

                #news_all = dict((x[3], x) for x in news_all).values()
                news_all = sorted(news_all)

                #select coin/reddit for first instance to set as title
                search = news_all[0][0]
                for news in news_all[1:]:
                    if news[0] == search:
                        news[0] = ""
                    else:
                        search = news[0]

                news_all_lines = []
                for news in news_all:
                    extracted = tldextract.extract(news[3])
                    domain = "{}.{}".format(extracted.domain, extracted.suffix)
                    news_all_lines.append("""<br><h2><div align="center">""" +
                                          str(news[0]).upper() +
                                          "</div></h2> [" +
                                          domain.capitalize() +
                                          """] <a href=" """ + str(news[3]) +
                                          """ ">""" + str(news[1]) + "</a>")

                    news_all_final = ' '.join(
                        [news_line for news_line in news_all_lines])
                # In[171]:

                if no_news_toggle == True:
                    no_news = "<br><h3> Check back tomorrow for more news on your favorite coins. In the meantime, check out these news in bitcoin/ethereum below! </h3><br>"
                else:
                    no_news = ""

                # FILL THIS IN:

                user = ""  #Email to send from
                pwd = ""  #Password of email to send from
                recipient = ""  # Receiving email

                body = retrieve_html()
                msg = Template(body).safe_substitute(comment=news_all_final,
                                                     price=prices,
                                                     no_news=no_news)

                # now = datetime.datetime.now()
                # if now.hour > 12:
                #     subject = str(now.month) + "/" + str(now.day) + " Reddit News " + str(now.hour - 12) + "pm"
                # else:
                #     subject = str(now.month) + "/" + str(now.day) + " Reddit News " + str(now.hour) + "am"

                now = datetime.datetime.now()
                nowhour = str(now.month)

                d0 = datetime.date(2017, 10, 25)
                d1 = datetime.date.today()
                delta = d1 - d0

                subject = "Bitcoin Railroad: Train #" + str(delta.days)

                if msg:
                    send_email(user, pwd, recipient, subject, msg)

                part = MIMEText(body, 'html')

                print("end")

            except Exception, e:
                print(e)
                pass
Ejemplo n.º 32
0
import json
import logging
import time
import urllib
from typing import List

from cidc_utils.loghandler.stack_driver_handler import send_mail, log_formatted
from bson import ObjectId, json_util
from flask import _request_ctx_stack, abort
from flask import current_app as app
from kombu import Connection, Exchange, Producer
from oauth2client.service_account import ServiceAccountCredentials

from settings import GOOGLE_UPLOAD_BUCKET, GOOGLE_BUCKET_NAME, GOOGLE_URL, RABBIT_MQ_ADDRESS, SENDGRID_API_KEY

CREDS = ServiceAccountCredentials.from_json_keyfile_name(
    "../auth/.google_auth.json")
CLIENT_ID = CREDS.service_account_email


def update_last_access(email: str):
    """
    Updates a user's last access time when they touch an endpoint.

    Arguments:
        email {str} -- User's email.
    """
    last_access = app.data.driver.db["last_access"]
    if not last_access.find_one({"email": email}):
        last_access.insert({
            "email":
            email,
Ejemplo n.º 33
0
import datetime
import numpy as np
import shutil
import pipes
from subprocess import check_call
from email.mime.text import MIMEText
from oauth2client.service_account import ServiceAccountCredentials
now = datetime.datetime.now()
todaysDate = now.strftime("%m/%d/%Y")
scope = ['https://spreadsheets.google.com/feeds']
json_key = json.load(
    open(
        '/Users/transcoder/Desktop/POD_Workflow_Files/Workflow-aa96f264017c.json'
    ))
credentials = ServiceAccountCredentials.from_json_keyfile_name(
    '/Users/transcoder/Desktop/POD_Workflow_Files/Workflow-aa96f264017c.json',
    scope)
gc = gspread.authorize(credentials)

googleDoc = gc.open('Workflow Test')

worksheet = googleDoc.worksheet("Project Tracker")

podPath = '/Volumes/videos/pod_videos/'
NoFolder = '/Volumes//videos/pod_videos/FromAudio/noFolder'

fileName = os.path.basename(sys.argv[1])
filePath = os.path.dirname(sys.argv[1])
fileExtension = os.path.splitext(sys.argv[1])

PODLocation = "/Volumes/pods/!Future_Videos"
Ejemplo n.º 34
0
def submit():
    scope = [
        'https://www.googleapis.com/auth/spreadsheets',
        "https://www.googleapis.com/auth/drive.file",
        "https://www.googleapis.com/auth/drive"
    ]
    creds = ServiceAccountCredentials.from_json_keyfile_name(
        "client_cred.json", scope)
    client = gspread.authorize(creds)

    sheet = client.open("Tech Test").sheet1  # get sheet 1 data

    sheet_data = sheet.get_all_records()
    k = []
    f_data = []
    for data in sheet_data:

        if db.session.query(DataTableModel).filter(
                DataTableModel.first_name == data["first_name"]).count() == 0:
            first_name = data["first_name"]
            last_name = data["last_name"]
            email = data["email"]
            job_title = data["job_title"]
            job_title_full = data["job_title_full"]
            city = data["city"]
            company = data["company"]
            country = data["country"]
            linkedin = data["linkedin"]
            company_website = data["company_website"]
            company_industry = data["company_industry"]
            company_founded = (data["company_founded"])
            company_size = data["company_size"]
            company_linkedin = data["company_linkedin"]
            company_headquaters = data["company_headquarters"]
            email_reliability_status = data["email_reliability_status"]
            receiving_email_server = data["receiving_email_server"]
            kind = data["kind"]
            tag = data["tag"]
            month = data["month"]
            # print(month)

            commit_data = DataTableModel(
                first_name, last_name, email, job_title_full, job_title, city,
                country, linkedin, company, company_website, company_industry,
                company_founded, company_size, company_linkedin,
                company_headquaters, email_reliability_status,
                receiving_email_server, kind, tag, month)

            db.session.add(commit_data)
            db.session.commit()

    post_data = db.session.query(DataTableModel).all()

    for data in post_data:
        f_data.append({
            "id": data.id,
            "first_name": data.first_name,
            "last_name": data.last_name,
            "email": data.email,
            "job_title": data.job_title,
            "job_title_full": data.job_title_full,
            "city": data.city,
            "country": data.country,
            "linkedin": data.linkedin,
            "company": data.company,
            "company_website": data.company_website,
            "company_industry": data.company_industry,
            "company_founded": data.company_founded,
            "company_size": data.company_size,
            "company_linkedin": data.company_linkedin,
            "company_headquaters": data.company_headquaters,
            "email_reliability_status": data.email_reliability_status,
            "receiving email server": data.receiving_email_server,
            "kind": data.kind,
            "tag": data.tag,
            "month": data.month
        })
    # print(f_data)
    return jsonify(f_data)
Ejemplo n.º 35
0
 def credentials(self):
     return ServiceAccountCredentials.from_json_keyfile_name(self.creds_file,
                                                             scopes=[self.scope])
Ejemplo n.º 36
0
import os
from oauth2client.service_account import ServiceAccountCredentials
import re
import cache_parser as cp
import shutil
from pandas.io import gbq

### OAuth2 credentialing and authentication; give gspread, gbq permissions
scopes = [
    'https://spreadsheets.google.com/feeds',
    'https://www.googleapis.com/auth/drive',
    "https://www.googleapis.com/auth/bigquery.insertdata",
    "https://www.googleapis.com/auth/bigquery",
    "https://www.googleapis.com/auth/cloud-platform"
]
credentials = ServiceAccountCredentials.from_json_keyfile_name(
    './ka_cred.json', scopes=scopes)
client = gspread.authorize(credentials)

### Load input parameters for API calls
sheet = client.open("data_parameters").sheet1
vals = sheet.get_all_values()
headers = vals.pop(0)

# Find latest cache, outfile
file_re = re.compile(r'logs\/.*')
cells = sheet.findall(file_re)
cell_list = [(cell.row, cell.col) for cell in cells]
latest_log_coords = max(cell_list, key=(lambda item: item[0]))

file_re = re.compile(r'outfiles\/.*')
cells = sheet.findall(file_re)
Ejemplo n.º 37
0
from apscheduler.scheduler import Scheduler
import logging

logging.basicConfig()

import datetime
import requests
# In[ ]:

#Before ap_scheduler

global email_list

# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(
    'bitcoin railroad-8cc9cd351748.json', scope)
client = gspread.authorize(creds)

# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
sheet = client.open("bitcoin railroad registration").sheet1

# In[21]:

list_of_lists = sheet.get_all_values()
df = pd.DataFrame(list_of_lists[1:])
df = df[[2]]
df.columns = ['email']
df = df[df.email != ""]

email_list = []
Ejemplo n.º 38
0
    requests.post(url, headers=headers, params=params)


try:
    #共有設定したスプレッドシートキーを変数[SPREADSHEET_KEY]に格納する。
    SPREADSHEET_KEY = '************************************'

    #2つのAPIを記述しないとリフレッシュトークンを3600秒毎に発行し続けなければならない
    scope = [
        'https://spreadsheets.google.com/feeds',
        'https://www.googleapis.com/auth/drive'
    ]

    #認証情報設定
    #ダウンロードしたjsonファイル名をcredentials変数に設定
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        '*******************************.json', scope)

    #OAuth2の資格情報を使用してGoogle APIにログインします。
    gc = gspread.authorize(credentials)

    #共有設定したスプレッドシートを開く
    workbook = gc.open_by_key(SPREADSHEET_KEY)
    worksheet = workbook.worksheet('記録')
    worksheetinfo = workbook.worksheet('アカウント情報')

    #アカウント情報取得
    #ユーザー名
    username = worksheetinfo.acell('B2').value
    #パスワード
    password = worksheetinfo.acell('C2').value
    #いいね数指定
Ejemplo n.º 39
0
import httplib2
import apiclient.discovery
from oauth2client.service_account import ServiceAccountCredentials

CREDENTIALS_FILE = 'cybersep-310108-c1268b1fb570.json'

# Читаем ключи из файла
credentials = ServiceAccountCredentials.from_json_keyfile_name(CREDENTIALS_FILE, ['https://www.googleapis.com/auth/spreadsheets', 'https://www.googleapis.com/auth/drive'])

httpAuth = credentials.authorize(httplib2.Http()) # Авторизуемся в системе
service = apiclient.discovery.build('sheets', 'v4', http = httpAuth)


fifile = '1oQKWSfnal13xLCPpfHqH46ROC9w9RBmIhpA70D8lLKg' #здесь надо будет вставить адрес табличьки, когда она появится


#новы лист под партию
results = service.spreadsheets().batchUpdate(
    spreadsheetId = fifile,
    body =
{
  "requests": [
    {
      "addSheet": {
        "properties": {
          "title": "Листок", #админ ж называет листок
          "gridProperties": {
            "rowCount": 1000,
            "columnCount": 7
          }
        }
Ejemplo n.º 40
0
import gspread
from oauth2client.service_account import ServiceAccountCredentials

# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(
    'Space-7a5132950347.json', scope)
client = gspread.authorize(creds)

# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
sheet = client.open("Sol").sheet1

# Extract and print all of the values
list_of_hashes = sheet.get_all_records()
print(list_of_hashes)
Ejemplo n.º 41
0
###### Login to google account #####
# driver = webdriver.Chrome(c.driver_path)
# driver.get(c.g_link)
# driver.find_element_by_name(c.identifier_element).send_keys(c.gmail_id)
# driver.find_element_by_xpath(c.xpath_next).click()
# driver.implicitly_wait(10)
# driver.find_element_by_name(c.pass_element).send_keys(c.gmail_pass)
# driver.find_element_by_xpath(c.xpath_login).click()

###### Open Google spreadsheet from selenium  ######
# driver.get(c.ss_url)
# time.sleep(4)
# driver.implicitly_wait(15)

## UPDATE DATA FROM SPREADSHEET
creds = ServiceAccountCredentials.from_json_keyfile_name(
    c.json_cred_path, c.ss_api)
client = gspread.authorize(creds)
sheet = client.open(c.ss_name).worksheet(c.s_name)
sheet.update_cell(c.update_row_number, c.update_col_number, c.update_value)

###### Open Google spreadsheet from selenium  ######
# driver.get(c.ss_url)
# #driver.implicitly_wait(20)
# time.sleep(15)
# driver.find_element_by_xpath(c.xpath_ss_tools).click()  ##tools
# driver.implicitly_wait(20)
# #'//div[@class="menu-button goog-control goog-inline-block goog-control-open docs-menu-button-open-below" and contains(text(), "<> Script editor")]'
# driver.find_element_by_xpath(c.xpath_ss_tools_script_editor).click()  ##script editor
# #driver.implicitly_wait(20)
# time.sleep(15)
# driver.switch_to_window(driver.window_handles[-1])  ## gs code active window command
Ejemplo n.º 42
0
import configparser
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from monzo import Monzo

scope = [
    "https://spreadsheets.google.com/feeds",
    'https://www.googleapis.com/auth/spreadsheets',
    "https://www.googleapis.com/auth/drive.file",
    "https://www.googleapis.com/auth/drive"
]

config = configparser.ConfigParser()
config.read('config/config.ini')

creds = ServiceAccountCredentials.from_json_keyfile_name(
    "config/creds.json", scope)
client = gspread.authorize(creds)

doc = client.open("Goals")
worksheet_list = doc.worksheets()
worksheet = doc.worksheet("Finance")

data = worksheet.get_all_records()

print(data)

# Monzo
print(config.get('API', 'apikey'))
client = Monzo(
    config.get('API', 'apikey')
)  # Replace access token with a valid token found at: https://developers.monzo.com/
Ejemplo n.º 43
0
import gspread
from oauth2client.service_account import ServiceAccountCredentials

app = Flask(__name__)

line_bot_api = LineBotApi(
    "ziev+1/ECWJDjw1CkOPjOMofjQ5mft0H0XtZknC/Vu+KnGZzi+2vFVF34UiX+QOdh4JADi+j/xeyPeSiGjyhnvTvKjNijstiixgQeY77aBxJ7R0B8TS/BMCG/y8KheHMwAZ7TJFKN6i5UPBoRzm2BQdB04t89/1O/w1cDnyilFU="
)
handler = WebhookHandler("4088552f2e9ee28de065d9bddce75ab2")

scope = [
    'https://spreadsheets.google.com/feeds',
    'https://www.googleapis.com/auth/drive'
]
creds = ServiceAccountCredentials.from_json_keyfile_name(
    'fanfloyd1977-2bf294ca8a0e.json', scope)
client = gspread.authorize(creds)


@app.route("/")
def hello():
    return "Hello DAO Flask-Heroku"


@app.route("/callback", methods=["POST"])
def callback():
    signature = request.headers["X-Line-Signature"]
    body = request.get_data(as_text=True)
    app.logger.info("Request body: " + body)

    try:
    yestoday = (today - datetime.timedelta(1)).__format__('%Y-%m-%d')
else:
    yestoday = (datetime.date(today.year, today.month, 1) -
                datetime.timedelta(1)).__format__('%Y-%m-%d')

SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
KEY_FILE_LOCATION = os.path.abspath(instance.get_key_file_path())
view_id_Dict = {
    'sd': '167419096',
    'gg': '183917273',
    'es': '170634318',
    'mj': '183931009',
    'gj': '92659632',
    'bs': '176331331'
}
credentials = ServiceAccountCredentials.from_json_keyfile_name(
    KEY_FILE_LOCATION, SCOPES)
metric_list = ['ga:users', 'ga:newUsers', 'ga:sessions']

#请求数据
#遍历每个网站的数据视图:
for key, viewid in view_id_Dict.items():
    count = 0
    #遍历每个数据视图的指标:
    for metric in metric_list:
        count += 1
        request = build('analyticsreporting', 'v4',
                        credentials=credentials).reports().batchGet(
                            body={
                                'reportRequests': [{
                                    'viewId':
                                    viewid,
Ejemplo n.º 45
0
from engine.AQI import AQImonitor
from engine.gamma import gammamonitor
from engine.SpotifyScrap import scrapSpotify
from engine.crawlerArtical import pttSearch, bballman_news, Spotify_TOP30, rssTechNews, rssNewsLtn, crawerYahoo
from engine.OpenDataTravel import readJsonFilter, showList
from engine.shopWeb import pchome, shopee, momoshop
from engine.ibus import getRoute, getRouteID, showRouteList, showRouteResult
from engine.GoogleMapsURL import googleMapsLat, googleMapsLon
import gspread
from oauth2client.service_account import ServiceAccountCredentials

scope = [
    'https://spreadsheets.google.com/feeds',
    'https://www.googleapis.com/auth/drive'
]
creds = ServiceAccountCredentials.from_json_keyfile_name(
    'HappyProgrammer.json', scope)

client = gspread.authorize(creds)

LineBotSheet = client.open('happy programmer')
userStatusSheet = LineBotSheet.worksheet('userStatus')
userInfoSheet = LineBotSheet.worksheet('userInfo')

app = Flask(__name__)

# 設定你的Channel Access Token
line_bot_api = LineBotApi(
    'kxyKN1dmIBxDNcfm6ZHFkIBbSwpN/inhArVJP6TyBqUXL1S0EmHI5R+DsgRV+GGUNrJxHwgcKi14HcXS3HYGuLYuJrkc5YCF0P/M9Wnpus3afvEi/NqcRVfWOD19LbtKmE9iGbgf5OB38wrRktwnHwdB04t89/1O/w1cDnyilFU='
)
# 設定你的Channel Secret
handler = WebhookHandler('0d60d38103dc9914b0e3be902c8cf2c2')
Ejemplo n.º 46
0
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    # config.gpu_options.per_process_gpu_memory_fraction = 0.005
    session = tf.Session(config=config)

    #Load strategy_meta
    try:
        #Get Strategy Meta from Google Sheet instead of json
        scope = [
            'https://spreadsheets.google.com/feeds',
            'https://www.googleapis.com/auth/drive'
        ]

        credentials = ServiceAccountCredentials.from_json_keyfile_name(
            '/media/workstation/Storage/GoogleProject/DeepLearningAlphaC.txt',
            scope)
        gc = gspread.authorize(credentials)
        spreadsheet = gc.open("TASK")
        worksheet_list = spreadsheet.worksheets()
        Accepted = spreadsheet.worksheet("Accepted").get_all_records()
        adf = pd.DataFrame(Accepted)
        adf = adf.astype("str")

        #Select a strat, and get strategy_meta from TASK.Accepted sheet
        strategy_meta = adf.loc[adf.Strategy == strat].to_dict(
            orient="records")[0]

        # json_path = os.path.join("STRATEGY_META", "{}.json".format(strat))
        # output_file = open(json_path).read()
        # strategy_meta = json.loads(output_file)
Ejemplo n.º 47
0
                             cursorclass=pymysql.cursors.DictCursor)

try:
    with connection.cursor() as cursor:
        sql = "INSERT INTO `airData` (`timeStamp`, `equipament` , `temperature`, `humidity` , `pressure`) VALUES (%s, %s, %s, %s, %s)"
        cursor.execute(sql, (timeStamp, equipament, temperature, humidity, pressure))
    connection.commit()

finally:
    connection.close()

#gsAirData(timeStamp, equipament, temperature, humidity, pressure, gasResistence)

from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('/home/pi/Public/dev/IoT2B/MS430/Raspberry_Pi/iot2bv2-ebd620022c11.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open_by_key('1Sjq3HmkMCt6LhME6F9rhteMYT1DlhseJwpMEFZc5qU4')
worksheet = wks.get_worksheet(0)

arqCont = open("/home/pi/Public/dev/IoT2B/MS430/Raspberry_Pi/cont.txt","r")
linha = arqCont.read(100)
print(linha)

contA = 'A' + str(linha)
contB = 'B' + str(linha)
contC = 'C' + str(linha)
contD = 'D' + str(linha)
contE = 'E' + str(linha)
contF = 'F' + str(linha)
Ejemplo n.º 48
0
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import subprocess

awsinfo = subprocess.check_output(
    'aws ec2 describe-instances --query Reservations[].Instances[].[PrivateIpAddress,PublicIpAddress,InstanceId,SubnetId,InstanceType,State.Name,KeyName,SecurityGroups,LaunchTime,Tags[?Key==`Name`].Value[]] --profile platform --output text',
    shell=True)
SPREAD_SHEET_NAME = "test"

scope = [
    'https://spreadsheets.google.com/feeds',
    'https://www.googleapis.com/auth/drive'
]

credentials = ServiceAccountCredentials.from_json_keyfile_name(
    'D:/python/awscli/test-4db795ba475c.json', scope)

gc = gspread.authorize(credentials)

wks = gc.open(SPREAD_SHEET_NAME).sheet1
# wks.delete_row(2)
# print(wks.get_all_records())
wks.append_row()

# wks.update_acell('A1', "Hello, gunman !")
Ejemplo n.º 49
0
#import zabbixgetdata

import gspread
from oauth2client.service_account import ServiceAccountCredentials

scope = [
    'https://spreadsheets.google.com/feeds',
    'https://www.googleapis.com/auth/drive'
]

credentials = ServiceAccountCredentials.from_json_keyfile_name(
    'Zabbix report auto update-83e9f35940ac.json', scope)

gc = gspread.authorize(credentials)
wks = gc.open("Zabbix Report -Test").sheet1

wks.update_acell('B2', "this should be in b2 cell")

cell_list = wks.range('A1:B7')

print(cell_list)