コード例 #1
0
def main():
    session = get_db_session()
    credentials = get_credentials()
    gc = gspread.authorize(credentials)

    workbook = gc.open("Hymn Singing Responses")
    wks_sacrament = workbook.worksheet("Sacrament Meeting")
    wks_other = workbook.worksheet("Other Meetings")
    wks_hymns = workbook.worksheet("Hymns")

    # foo = wks_sacrament.get_all_values()
    # print(foo[44])
    # dates = wks_sacrament.col_values(2)
    # print(dates)
    hymnal = wks_hymns.get_all_values()
    for hymn in hymnal:
        if not hymn[0].isdigit():
            continue

        session.add(
            Hymnbook(hymn_number=hymn[0],
                     hymn_name=hymn[1],
                     scriptures=hymn[2],
                     text_author=hymn[3],
                     music_composer=hymn[4],
                     tempo_low=hymn[5],
                     tempo_high=hymn[6],
                     singing_descriptor=hymn[7],
                     meter=hymn[8],
                     tune=hymn[9]))
        # print(hymn)
    print(len(session.query(Hymnbook).all()))
コード例 #2
0
        def post(self):
            credentials = get_credentials(sub=settings.RESELLER_ADMIN)
            http = httplib2.Http()

            credentials.authorize(http)

            service = build(serviceName="reseller",
                            version=settings.RESELLER_API_VERSION,
                            http=http)

            response = service.subscriptions().insert(
                customerId=self.session['domain'],
                body={
                    'customerId': self.session['domain'],
                    'skuId': ResellerSKU.GoogleDriveStorage20GB,
                    'plan': {
                        'planName': ResellerPlanName.Flexible
                    },
                    'seats': {
                        'numberOfSeats': 5,
                        'maximumNumberOfSeats': 5,
                    },
                    'purchaseOrderId': 'G00gl39001-d20'
                }).execute(num_retries=5)

            return self.redirect("/step7")
コード例 #3
0
class StdOutListener(StreamListener):
    """A listener handles tweets that are received from the stream.
    This listener dumps the tweets into a PubSub topic
    """
    count = 0
    twstring = ''
    tweets = []
    batch_size = 50
    total_tweets = 10000000
    client = utils.create_pubsub_client(utils.get_credentials())

    def write_to_pubsub(self, tw):
        publish(self.client, 'projects/assignment3-276800/topics/assignment3', tw)

    def on_data(self, data):
        """What to do when tweet data is received."""
        self.tweets.append(data)
        if len(self.tweets) >= self.batch_size:
            self.write_to_pubsub(self.tweets)
            self.tweets = []
        self.count += 1
        # if we've grabbed more than total_tweets tweets, exit the script.
        if self.count > self.total_tweets:
            return False
        if (self.count % 1000) == 0:
            print 'count is: %s at %s' % (self.count, datetime.datetime.now())
        return True

    def on_error(self, status):
        print status
コード例 #4
0
ファイル: zoomer.py プロジェクト: minnaanil/zoomer
def main():
    args = parse_arguments()

    if args.getmousepos:
        utils.getjoinposn()
    elif args.updatepass is not None:
        utils.update_pass(args.updatepass)
    elif args.append is not None:
        utils.append(args.append)
    elif args.changepass is not None:
        utils.change_pass(args.changepass)
    else:
        config = configparser.ConfigParser()
        config.read("data.ini")
        joinposn = config["VALUES"]["join"].split(", ")
        try:
            joinposn = [int(x) for x in joinposn]
        except ValueError:
            # fmt: off
            print("Oops, it looks like you haven't"
                  "set the positon of join button.")
            # fmt: on

        subject = utils.get_subject() if args.subject is None else args.subject
        zoom_id, zoom_pass = utils.get_credentials(subject)
        utils.auto_type(zoom_id, zoom_pass, joinposn)
コード例 #5
0
ファイル: monstrosity.py プロジェクト: laibel/osisoftpy
    def __init__(self, url='https://dev.dstcontrols.local/piwebapi',
                 verifyssl=True, authtype='kerberos', username=None,
                 password=None):
        # type: (str, bool, str, str, str) -> None

        """

        :rtype: None
        """
        log.info('Instantiating the OSIsoftPy WebAPI with the following '
                 'arguments: URL: %s, VerifySSL: %s, AuthType: %s, '
                 'Username: %s', url, verifyssl, authtype, username)

        self.url = url
        self.verifyssl = verifyssl
        self.authtype = authtype

        log.debug('Creating Requests Session object. VerifySSL: %s, '
                  'AuthType: %s', self.verifyssl, self.authtype)

        self.session = requests.Session()
        self.session.verify = verifyssl
        self.session.auth = get_credentials(authtype=self.authtype,
                                            username=username,
                                            password=password)

        if test_connectivity(self.url, self.session):
            log.info('OSIsoftPy WebAPI instantiation success using %s '
                     'against %s', authtype, self.url)
            self.dataservers = TypedList(validtypes=DataArchive)
        else:
            log.error(
                'OSIsoftPy WebAPI instantiatian failed using %s against '
                '%s', authtype, self.url, exc_info=True)
コード例 #6
0
        def post(self):

            credentials = get_credentials(settings.RESELLER_ADMIN)

            http = httplib2.Http()
            credentials.authorize(http)

            service = build(serviceName="reseller",
                            version=settings.RESELLER_API_VERSION,
                            http=http)

            response = service.subscriptions().insert(
                customerId=self.session['domain'],
                body={
                    'customerId': self.session['domain'],
                    'subscriptionId': "%s-apps" % self.session['domain'],
                    'skuId': ResellerSKU.GoogleApps,
                    'plan': {
                        'planName': ResellerPlanName.Flexible,
                        'isCommitmentPlan': False,
                    },
                    'seats': {
                        'numberOfSeats': self.request.get("seats"),
                        'maximumNumberOfSeats': self.request.get("seats")
                    },
                    'renewalSettings': {
                        'renewalType': ResellerRenewalType.PayAsYouGo
                    },
                    'purchaseOrderId': 'G00gl39001'
                }).execute(num_retries=5)

            return self.redirect("/step3")
コード例 #7
0
class StdOutListener(StreamListener):
    count = 0
    twstring = ''
    tweets = []
    batch_size = 50  #cuantos tweets extraer por request
    total_tweets = 10000000  #detener al llegar a total_tweets
    client = utils.create_pubsub_client(utils.get_credentials())

    def write_to_pubsub(self, tw):
        publish(self.client, 'projects/sd-3-241301/topics/twitter', tw)

    def on_data(self, data):
        self.tweets.append(data)
        if len(self.tweets) >= self.batch_size:
            self.write_to_pubsub(self.tweets)
            self.tweets = []
        self.count += 1
        if self.count > self.total_tweets:
            return False
        if (self.count % 1000) == 0:
            print 'count is: %s at %s' % (self.count, datetime.datetime.now())
        return True

    def on_error(self, status):
        print status
コード例 #8
0
def replicate(db_url, username, friend_name, friend_db_url):
    auth_filters = get_credentials()
    db = Database(db_url, filters=auth_filters)
    replicator_db = db.server['_replicator']

    # this describes the replication task
    replication_doc = {
        "source": friend_db_url,
        "target": db_url,
        "continuous": True,
        "filter": "things/from_friend",
        "query_params": {
            "friend": friend_name,
        }
    }

    # we try to delete an existing replication with the same Id
    # this would stop the replication
    try:
        del replicator_db[friend_name]
    except ResourceNotFound:
        pass

    # we store the replication task, which will automatically start it
    replication_id = '{src}-{target}'.format(src=friend_name, target=username)
    replicator_db[replication_id] = replication_doc
コード例 #9
0
ファイル: heartbeat.py プロジェクト: FR4NK-W/scion-box-1
def request_server(ia_list):
    """
    Communicate with SCION coordination server over HTTPS.
    Call the Heartbeat API
    Send Post Request to the SCION coord,
    receive the list of current neighbor
    :returns dict current_neighbors:
    """
    credentials = utils.get_credentials()
    POST_REQ = SCION_COORD_URL + "api/as/heartbeat/" + credentials["ID"] + "/" + credentials["SECRET"]
    # TODO send some status info to the SCION coord
    IAList = []
    for ia in ia_list:
        list = utils.assemble_current_br_list(ia)
        IA = {'ISD': ia._isd, 'AS': ia._as, 'Connections': list}
        IAList.append(IA)
    ip_address = ni.ifaddresses(INTERFACE)[ni.AF_INET][0]['addr']
    # Send the list of current connections aswell as, userMail, IA of the scionLabAS and the ip address.
    HeartBeatQuery = {'IAList': IAList, 'UserMail' : credentials["UserMail"], 'IP': ip_address, 'Time': time.time()}
    logging.info("Calling HB API at: %s, with json: %s", POST_REQ, HeartBeatQuery)
    try:
        resp = requests.post(POST_REQ, json=HeartBeatQuery, timeout=10)
    except requests.exceptions.RequestException as e:
        return None, e
    if resp.status_code == 200:
        return resp, None
    else:
        logging.error("[ERROR] Wrong Status Code ! %s", resp.status_code)
        exit(1)
コード例 #10
0
 def get_session(self):
     if not self._session:
         # Lazy load the session
         import utils
         credentials = utils.get_credentials()
         self._session = get_session(credentials["username"], credentials["password"])
     return self._session
コード例 #11
0
class StdOutListener(StreamListener):
    """A listener handles tweets that are received from the stream.
    This listener dumps the tweets into a PubSub topic
    """

    count = 0
    twstring = ''
    tweets = []
    batch_size = 50
    total_tweets = 10000000
    client = utils.create_pubsub_client(utils.get_credentials())

    def write_to_pubsub(self, tw):
        publish(self.client, PUBSUB_TOPIC, tw)

    def on_data(self, data):
        """What to do when tweet data is received."""
        self.tweets.append(data)
        if len(self.tweets) >= self.batch_size:
            self.write_to_pubsub(self.tweets)
            self.tweets = []
        self.count += 1
        # if we've grabbed more than total_tweets tweets, exit the script.
        # If this script is being run in the context of a kubernetes
        # replicationController, the pod will be restarted fresh when
        # that happens.
        if self.count > self.total_tweets:
            return False
        if (self.count % 1000) == 0:
            print 'count is: %s' % self.count
        return True

    def on_error(self, status):
        print status
コード例 #12
0
ファイル: app.py プロジェクト: shantanu69/mail_power
def main():
    """ The entry point """

    client = input("client name : ")

    server = IMAP4_SSL('imap.gmail.com')
    usr, pass_ = utils.get_credentials('data/credentials.dat') # collecting the authentication data data/credentials.dat
    stat = server.login(usr, pass_)[0]
    if stat == 'OK':
        print("Listening for mails......")
        try:
            while True:
                server.select('Inbox') # select the inbox
                mails = get_recent_mails(server, client)
                if mails != ['']:
                    for mail in mails:
                        res = server.fetch(mail.encode(), '(UID BODY[TEXT])')
                        msg_body = res[1][0][1].decode().strip("\r\n")
                        parse_n_send_command(msg_body)
                time.sleep(5)
        except KeyboardInterrupt:
            print("Done Listening...")

        server.close()
        server.logout()
    else:
        pass
    return
コード例 #13
0
class StdOutListener(StreamListener):
    """A listener handles tweets that are received from the stream.
    This listener dumps the tweets into a PubSub topic
    """
    count = 0
    twstring = ''
    tweets = []
    batch_size = 50
    total_tweets = 100000
    client = utils.create_pubsub_client(utils.get_credentials())

    def on_status(self, data):
        write_to_pubsub(reformat_tweet(data._json))
        self.count += 1
        # if we've grabbed more than total_tweets tweets, exit the script.
        if self.count > self.total_tweets:
            return False
        return True

    '''
    def on_data(self, data):
        """What to do when tweet data is received."""
        self.tweets.append(data)
        if len(self.tweets) >= self.batch_size:
            write_to_pubsub(reformat_tweet(data._json))
            self.tweets = []
        if self.count > self.total_tweets:
            return False
        if (self.count % 1000) == 0:
            print('count is: %s at %s' % (self.count, datetime.now()))
        return True
    '''

    def on_error(self, status):
        print(status)
コード例 #14
0
def replicate(db_url, username, friend_name, friend_db_url):
    auth_filters = get_credentials()
    db = Database(db_url, filters=auth_filters)
    replicator_db = db.server['_replicator']

    # this describes the replication task
    replication_doc = {
        "source": friend_db_url,
        "target": db_url,
        "continuous": True,
        "filter": "things/from_friend",
        "query_params": {
            "friend": friend_name,
        }
    }

    # we try to delete an existing replication with the same Id
    # this would stop the replication
    try:
        del replicator_db[friend_name]
    except ResourceNotFound:
        pass

    # we store the replication task, which will automatically start it
    replication_id = '{src}-{target}'.format(src=friend_name, target=username)
    replicator_db[replication_id] = replication_doc
コード例 #15
0
def main(argv):
    """Main routine."""

    options = process_args(argv)
    logging.basicConfig(level=logging.INFO)
    logger.info('Neutron script starts with the arguments %s' % options)
    options.creds = utils.get_credentials()
    run(options)
コード例 #16
0
def send_email(wet_status):
    """Gets the credentials from a json file and send the alert."""
    creds = get_credentials()
    server = smtplib.SMTP(creds.get("SMTP_HOST"), 587)
    server.login(creds.get("SMTP_LOGIN"), creds.get("SMTP_PASSWORD"))
    msg = MESSAGE[wet_status]
    for receiver in creds.get("RECEIVERS_EMAILS"):
        server.sendmail(creds.get("SENDER_EMAIL"), receiver, msg)
コード例 #17
0
    def post(self):
        domain = self.request.get("domain")
        logging.info("Execing cleanup task for domain (%s)" % domain)

        http = httplib2.Http()
        httplib2.debuglevel = 4
        credentials = get_credentials(settings.RESELLER_ADMIN)
        credentials.authorize(http)

        service = build("reseller", settings.RESELLER_API_VERSION, http=http)

        response = service.customers().get(
            customerId=domain).execute(num_retries=5)

        def delete_sub_callback(request_id, response, exception):
            # just log the exception.
            logging.exception(exception)
            pass

        if not response.get("alternateEmail"):
            logging.info("Skipping cleanup, customer not resold..")
            exit()

        response = service.subscriptions().list(
            customerId=domain,
            maxResults=100).execute(num_retries=5)

        # resort the subscriptions and bump GAFB subs to the bottom
        subs = sorted(
            response['subscriptions'],
            cmp=lambda a, b: int(a['skuId'] == ResellerSKU.GoogleApps) - 1)

        batch = BatchHttpRequest(callback=delete_sub_callback)

        logging.info("Purging %d subs" % len(subs))

        for s in subs:
            if s['status'] in [ResellerDeletionType.Cancel,
                               ResellerDeletionType.Suspend,
                               ResellerDeletionType.Downgrade]:
                logging.info("Skipping subscription, in deleted state")
                continue

            # Google-Drive-storage / Google-Vault must be cancelled.
            deletionType = ResellerDeletionType.Cancel

            # GAfB cannot be 'cancelled', and must be 'suspended'
            if s['skuId'] == ResellerSKU.GoogleApps:
                deletionType = ResellerDeletionType.Suspend

            request = service.subscriptions().delete(
                customerId=domain,
                subscriptionId=s['subscriptionId'],
                deletionType=deletionType)

            batch.add(request)

        batch.execute(http=http)
コード例 #18
0
 def __init__(self):
     creds = get_credentials()
     tapi = TuyaApi()
     tapi.init(
         username=creds.get("TUYA_USERNAME"),
         password=creds.get("TUYA_PASSWORD"),
         countryCode=creds.get("TUYA_LOCATION")
     )
     self.device = tapi.get_device_by_id(creds.get("TUYA_DEVICE_ID"))
コード例 #19
0
def main():
    env = sys.argv[1]
    db_details = get_credentials(env)
    tables = get_tables("tables_to_be_loaded.txt")
    for table in tables:
        print("Discovering data...")
        data, cols = read_table(db_details, table)
        print(f"Table: {table} | {len(data)} rows")
        print("Writing data...")
        write_table(db_details, table, data, cols)
        print("Process completed")
コード例 #20
0
def publish(pubsub_topic, data_lines):
    """Publish to the given pubsub topic."""
    messages = []
    for line in data_lines:

        pub = base64.urlsafe_b64encode(str(line))
        messages.append({'data': pub})

    body = {'messages': messages}
    client = utils.create_pubsub_client(utils.get_credentials())
    resp = client.projects().topics().publish(
        topic=pubsub_topic, body=body).execute(num_retries=NUM_RETRIES)
    return resp
コード例 #21
0
def replication_status(db_url):
    auth_filters = get_credentials()
    db = Database(db_url, filters=auth_filters)
    server = db.server

    # print a nice header
    header = '{:>10s} {:35s} => {:35s} {:>5s} {:>6s}'.format('Id', 'Source',
            'Target', 'Docs', 'Prog.')
    print header
    print '=' * len(header)

    # /_active_tasks has information about all running tasks (indexers,
    # replication, etc). We use it to get progress info for active
    # replication tasks
    for task in server.active_tasks():
        if task.get('type', None) != 'replication':
            continue

        print '{:>10s} {:35s} => {:35s} {:5d} {:5d}%'.format(
                task.get('doc_id', ''),
                task.get('source', ''),
                task.get('target', ''),
                task.get('docs_written', 0),
                task.get('progress', 0)
            )


    # For information about failed replications (eg filter does not exist
    # at the source, or the source does not exist at all), we have to look
    # into the documents in the /_replicator database
    replicator_db = server['_replicator']

    for result in replicator_db.view('_all_docs', include_docs=True):

        # we're not interested in design documents
        if result['id'].startswith('_design/'):
            continue

        doc = result['doc']

        # all active (non-error) replication tasks have already been printed
        # above; we're only interested in those that failed
        if doc.get('_replication_state', None) != 'error':
            continue

        print '{:>10s} {:35s} => {:35s} {:>12s}'.format(
                result['id'],
                doc.get('source', ''),
                doc.get('target', ''),
                doc.get('_replication_state', '')
            )
コード例 #22
0
class StdOutListener(StreamListener):
    """A listener handles tweets that are received from the stream.
    This listener dumps the tweets into a PubSub topic
    """

    count = 0
    twstring = ''
    tweets = []
    batch_size = 50
    total_tweets = 10000000
    client = utils.create_pubsub_client(utils.get_credentials())
    print 'in stdoutlistener'

    def write_to_pubsub(self, tw):
        publish(self.client, PUBSUB_TOPIC, tw)

    def on_data(self, data):
        """What to do when tweet data is received."""

        pub_data = {}

        all_data = json.loads(data)

        pub_data["tweet"] = all_data["text"]
        pub_data["username"] = all_data["user"]["screen_name"]
        pub_data["userlocation"] = all_data["user"]["location"]
        pub_data["retweetcount"] = all_data["retweet_count"]
        pub_data["favoritecount"] = all_data["favorite_count"]

        pass_data = json.dumps(pub_data)

        self.tweets.append(pass_data)
        if len(self.tweets) >= self.batch_size:
            self.write_to_pubsub(self.tweets)
            self.tweets = []
        self.count += 1
        # if we've grabbed more than total_tweets tweets, exit the script.
        # If this script is being run in the context of a kubernetes
        # replicationController, the pod will be restarted fresh when
        # that happens.
        if self.count > self.total_tweets:
            return False
        if (self.count % 1000) == 0:
            print 'count is: %s at %s' % (self.count, datetime.datetime.now())
        return True

    def on_error(self, status):
        print status
コード例 #23
0
ファイル: streamer.py プロジェクト: RobDavis/geotweets
def start_stream(q, bounding_box, fn='tweets.json', search_terms=None):
    '''Takes in a Queue object, a bounding_box of [lon, lat, lon, lat] for
    SW and NE corners, a filename and a search term list. Examples in:
    bounding_box = geo_converter.get_bounding_box_from(g)
    search_terms = geo_converter.get_search_terms_from(g)
    '''
    global stream
    (__, auth) = utils.get_credentials("consumerkeyandsecret", False)
    L = ListenerQueue(q, fn, search_terms)
    stream = tweepy.Stream(auth, L)
    stream.filter(locations=bounding_box, filter_level='none', async=True)
    # if search_terms:
    #     # OR semantics:
    #     stream.filter(locations=bounding_box, track=search_terms, async=True)
    # else:
    #     stream.filter(locations=bounding_box, async=True)
    return stream
コード例 #24
0
ファイル: streamer.py プロジェクト: owenst/geotweets
def start_stream(q, bounding_box, fn='tweets.json', search_terms=None):
    '''Takes in a Queue object, a bounding_box of [lon, lat, lon, lat] for
    SW and NE corners, a filename and a search term list. Examples in:
    bounding_box = geo_converter.get_bounding_box_from(g)
    search_terms = geo_converter.get_search_terms_from(g)
    '''
    global stream
    (__, auth) = utils.get_credentials("consumerkeyandsecret", False)
    L = ListenerQueue(q, fn, search_terms)
    stream = tweepy.Stream(auth, L)
    stream.filter(locations=bounding_box, filter_level='none', async=True)
    # if search_terms:
    #     # OR semantics:
    #     stream.filter(locations=bounding_box, track=search_terms, async=True)
    # else:
    #     stream.filter(locations=bounding_box, async=True)
    return stream
コード例 #25
0
def init_db(dburl):
    print 'Initializing', dburl

    print 'Authenticating'
    filters = get_credentials()

    db = Database(dburl, filters=filters)
    server = db.server

    try:
        server.delete_db(db.dbname)
        print 'Deleting', db.dbname

    except ResourceNotFound:
        pass

    db = server.get_or_create_db(db.dbname)
    print 'Created', db.dbname
コード例 #26
0
        def post(self):
            credentials = get_credentials(sub=settings.RESELLER_ADMIN)
            http = httplib2.Http()

            credentials.authorize(http)

            service = build(serviceName="licensing",
                            version='v1',
                            http=http)

            service.licenseAssignments().insert(
                productId=ResellerProduct.GoogleDrive,
                skuId=ResellerSKU.GoogleDriveStorage20GB,
                body={
                    'userId': 'admin@%s' % self.session['domain']
                }).execute(num_retries=5)

            return self.render_template("templates/fin.html")
コード例 #27
0
def init_db(dburl):
    print 'Initializing', dburl

    print 'Authenticating'
    filters = get_credentials()

    db = Database(dburl, filters=filters)
    server = db.server

    try:
        server.delete_db(db.dbname)
        print 'Deleting', db.dbname

    except ResourceNotFound:
        pass

    db = server.get_or_create_db(db.dbname)
    print 'Created', db.dbname
コード例 #28
0
        def post(self):
            '''
            Call the site verification api and fetch the token value.
            '''
            credentials = get_credentials(settings.RESELLER_ADMIN)

            http = httplib2.Http()
            credentials.authorize(http)

            # establish default values.
            verification_type = "INET_DOMAIN"
            identifier = self.session['domain']
            verification_method = self.request.get("verificationMethod")

            # Does the requested verification method fall into the "site" type?
            if verification_method in settings.SITE_VERIFICATION_METHODS:
                # a "site" type is chosen, the values are a different.
                verification_type = "SITE"
                # site verification methods must begin with http or https
                identifier = "http://%s" % self.session['domain']

            # build the site verification service.
            service = build(serviceName="siteVerification",
                            version="v1",
                            http=http)

            # fetch a verification token.
            response = service.webResource().getToken(body={
                'site': {
                    'type': verification_type,
                    'identifier': identifier
                },
                'verificationMethod': verification_method
            }).execute(num_retries=5)

            return self.render_template("templates/step3_confirm.html",
                                        verification_token=response['token'],
                                        verification_type=verification_type,
                                        verification_method=verification_method,
                                        verification_identifier=identifier)
コード例 #29
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)

    credentials = utils.get_credentials(args.config)
    session = http_session.ZephyrSession(username=credentials['username'],
                                         password=credentials['password'])

    if args.verbose:
        LOGGER.debug("API version: %s", session.version['version'])

    # Build time range for specified day
    start_time = datetime.datetime.combine(
        date=args.date,
        time=datetime.time.min).replace(tzinfo=datetime.timezone.utc)
    end_time = datetime.datetime.combine(
        date=args.date + datetime.timedelta(days=1),
        time=datetime.time.min).replace(tzinfo=datetime.timezone.utc)

    rows = sort(
        get_data(session=session, start_time=start_time, end_time=end_time))
    write_csv(path=args.output, rows=rows)
コード例 #30
0
        def post(self):
            credentials = get_credentials(sub=settings.RESELLER_ADMIN)

            username = "******" % self.session['domain']
            password = "******"

            http = httplib2.Http()
            credentials.authorize(http)

            service = build(serviceName="admin",
                            version="directory_v1",
                            http=http)

            # create the user.
            service.users().insert(body={
                'primaryEmail': username,
                'name': {
                    'givenName': 'Admin',
                    'familyName': 'Admin',
                    'fullName': 'Admin Admin'
                },
                'isAdmin': True,
                'suspended': False,
                'password': password
            }).execute(num_retries=5)

            # make the user a super admin.
            service.users().makeAdmin(
                userKey=username,
                body={
                    'status': True
                }).execute(num_retries=5)

            self.session['username'] = username

            return self.render_template("templates/step5_confirm.html",
                                        domain=self.session['domain'],
                                        username=username,
                                        password=password)
コード例 #31
0
    def post(self):
        domain = self.request.get('domain')

        credentials = get_credentials(settings.RESELLER_ADMIN)

        http = httplib2.Http()
        credentials.authorize(http)

        service = build(serviceName="reseller",
                        version=settings.RESELLER_API_VERSION,
                        http=http)

        response = service.customers().insert(body={
            'customerDomain': domain,
            'alternateEmail': '*****@*****.**',
            'phoneNumber': '212.565.0000',
            'postalAddress': {
                'contactName': "A Googler",
                'organizationName': 'Google, Inc',
                'locality': 'New York City',
                'countryCode': 'US',
                'region': 'NY',
                'postalCode': '10011',
                'addressLine1': '76 9th Ave'
            }
        }).execute(num_retries=5)

        self.session['domain'] = domain

        # Mark the domain for deletion in approx 5 days.
        taskqueue.add(url="/tasks/cleanup",
                      name="cleanup__%s" % domain.replace(".", "_"),
                      countdown=settings.DOMAIN_CLEANUP_TIMER,
                      params={
                          'domain': domain
                      })

        return self.redirect("/step2")
コード例 #32
0
ファイル: geosearchclass.py プロジェクト: owenst/geotweets
    def search(self):
        '''Perform a geolocated search using the class attributes
        'search_term', 'result_type', 'count', and 'geo_string'.

        Requires an api object as returned by the tweepy module.

        USAGE:
        search_results = search(api)
        
        See: http://docs.tweepy.org/en/v3.5.0/api.html#API.search
        '''
        if not self.credits_retrieved:
            (self.api, __) = utils.get_credentials(self.keys_file, True)
            self.credits_retrieved = True
        geo_string = getattr(self, "geo_string")
        if self._geo_string is None:
            raise Exception("initialize geo string")
        search_results = self.api.search(q=self._search_term,
                                         geocode=geo_string,
                                         result_type=self._result_type,
                                         count=self._count)
        self.search_results = search_results
        return self.search_results
コード例 #33
0
ファイル: metadata.py プロジェクト: rcgsheffield/urban_flows
def main():
    parser, args = get_args()
    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)

    if args.sites or args.sensors:

        credentials = utils.get_credentials(args.config)
        session = http_session.ZephyrSession(username=credentials['username'], password=credentials['password'])

        for internal_device_id, device in session.devices.items():
            LOGGER.debug("Device ID %s (%s)", internal_device_id, device['zNumber'])

            if args.sites:
                asset = device_to_site(device)
            else:
                for slot in {'A', 'B'}:
                    asset = device_to_sensor(device, slot=slot)

            print(asset)


    else:
        parser.print_help()
コード例 #34
0
    def search(self):
        '''Perform a geolocated search using the class attributes
        'search_term', 'result_type', 'count', and 'geo_string'.

        Requires an api object as returned by the tweepy module.

        USAGE:
        search_results = search(api)
        
        See: http://docs.tweepy.org/en/v3.5.0/api.html#API.search
        '''
        if not self.credits_retrieved:
            (self.api, __) = utils.get_credentials(self.keys_file, True)
            self.credits_retrieved = True
        geo_string = getattr(self, "geo_string")
        if self._geo_string is None:
            raise Exception("initialize geo string")
        search_results = self.api.search(q=self._search_term,
                                         geocode=geo_string,
                                         result_type=self._result_type,
                                         count=self._count)
        self.search_results = search_results
        return self.search_results
コード例 #35
0
def main():
    parser = get_parser()
    args = parser.parse_args()

    if args.doc:
        print __doc__
        import sys
        sys.exit(0)

    # pass in an API to GeoSearchClass to get full access for posting
    (api, __) = utils.get_credentials('consumerkeyandsecret', False)
    g = geosearchclass.GeoSearchClass('params.txt', None, api)

    if args.filename:
        print 'Using parameters from ' + str(args.filename)
        g.set_params_from_file(args.filename)
    else:
        print "Using search values from params.txt"
        g.set_params_from_file('params.txt')

    if args.output:
        fn = str(args.output)
    else:
        fn = 'tweets.json'
    print 'Output file: ' + fn

    if args.address:
        print "Finding geocoordates for address:\n{}".format(args.address)
        coords = geo_converter.get_geocoords_from_address(args.address)
        if coords:
            g.latitude = coords[0]
            g.longitude = coords[1]
        else:
            print "Failed to find coordinates"
            sys.exit()

    verify(g, fn)
コード例 #36
0
ファイル: scan_and_respond.py プロジェクト: owenst/geotweets
def main():
    parser = get_parser()
    args = parser.parse_args()

    if args.doc:
        print __doc__
        import sys
        sys.exit(0)

    # pass in an API to GeoSearchClass to get full access for posting
    (api, __) = utils.get_credentials('consumerkeyandsecret', False)
    g = geosearchclass.GeoSearchClass('params.txt', None, api)

    if args.filename:
        print 'Using parameters from ' + str(args.filename)
        g.set_params_from_file(args.filename)
    else:
        print "Using search values from params.txt"
        g.set_params_from_file('params.txt')

    if args.output:
        fn = str(args.output)
    else:
        fn = 'tweets.json'
    print 'Output file: ' + fn

    if args.address:
        print "Finding geocoordates for address:\n{}".format(args.address)
        coords = geo_converter.get_geocoords_from_address(args.address)
        if coords:
            g.latitude = coords[0]
            g.longitude = coords[1]
        else:
            print "Failed to find coordinates"
            sys.exit()

    verify(g, fn)
コード例 #37
0
        def get(self):
            '''
            Call the site verification service and see if the
            token has been fulfilled (e.g. a dns entry added)
            '''

            credentials = get_credentials(settings.RESELLER_ADMIN)

            http = httplib2.Http()
            credentials.authorize(http)

            service = build(serviceName="siteVerification",
                            version="v1",
                            http=http)

            verification_type = self.request.get("verification_type")
            verification_ident = self.request.get("verification_identifier")
            verification_method = self.request.get("verification_method")

            verification_status = None
            try:
                # try to do a verification,
                # which will test the method on the Google server side.
                service.webResource().insert(
                    verificationMethod=verification_method,
                    body={
                        'site': {
                            'type': verification_type,
                            'identifier': verification_ident
                        },
                        'verificationMethod': verification_method
                    }
                ).execute(num_retries=5)
                verification_status = True
            except Exception, e:
                verification_status = False
                        continue
                    tweets.append(mtweet)
            else:
                # pause before checking again
                print 'sleeping...'
                time.sleep(WAIT)
        response = utils.bq_data_insert(bigquery, PROJECT_ID, os.environ['BQ_DATASET'],
                             os.environ['BQ_TABLE'], tweets)
        tweets = []
        count += 1
        if count % 25 == 0:
            print ("processing count: %s of %s at %s: %s" %
                   (count, count_max, datetime.datetime.now(), response))


if __name__ == '__main__':
    topic_info = PUBSUB_TOPIC.split('/')
    topic_name = topic_info[-1]
    sub_name = "tweets-%s" % topic_name
    print "starting write to BigQuery...."
    credentials = utils.get_credentials()
    bigquery = utils.create_bigquery_client(credentials)
    pubsub = utils.create_pubsub_client(credentials)
    try:
        # TODO: check if subscription exists first
        subscription = create_subscription(pubsub, PROJECT_ID, sub_name)
    except Exception, e:
        print e
    write_to_bq(pubsub, sub_name, bigquery)
    print 'exited write loop'
コード例 #39
0
def test():
    from utils import get_credentials
    credentials = get_credentials()
    module_dict = get_module_dict(*credentials)
    module_window = ModuleWindow(module_dict, credentials)
    module_window.mainloop()
コード例 #40
0
    ['COMP1081/PRAC/002', '20', 'Algorithms and Data Structures Practical', '11-20, 25-33, 39-41', 'COMPPGSTAFF04', 'D/CM003']
    ['COMP1081/PRAC/001', '30', 'Algorithms and Data Structures Practical', '11-20, 25-33, 39-41', 'COMPPGSTAFF04', 'D/CG66']
    ['COMP1081/LECT/002', '60', 'Algorithms and Data Structures Lecture', '11-20, 25-33, 39-41', 'Ivressimtzis, Dr I, Johnson, Dr M', 'D/E101']
    ['COMP2181/LECT/001', '50', 'Theory of Computation Lecture', '11-20, 25-33, 39-41', 'Dantchev, Dr S S, Gadouleau, Dr M, Mertzios, Dr G', 'D/CM101']
    ['COMP2181/LECT/002', '50', 'Theory of Computation Lecture', '11-20, 25-33, 39-41', 'Dantchev, Dr S S, Gadouleau, Dr M, Mertzios, Dr G', 'D/W309']
    ['COMP2181/PRAC/001', '25', 'Theory of Computation Practical', '11-20, 25-33, 39-41', 'COMPPGSTAFF06', 'D/PCL050']
    ['COMP2181/PRAC/002', '25', 'Theory of Computation Practical', '11-20, 25-33, 39-41', 'COMPPGSTAFF06', 'D/E101']
    ['COMP2191/WORK/001', '55', 'Software Engineering Workshop', '11-20, 25-33', 'Drummond, Dr S A', 'D/E102']
    ['COMP2191/LECT/001', '55', 'Software Engineering Lecture', '11-20, 25-33, 39-41', 'Drummond, Dr S A, Budgen, Prof D', 'D/E102']
    ['COMP2191/PRAC/001', '48', 'Software Engineering Practical', '11, 13, 15, 17, 19, 25, 27, 29, 31, 33', 'Drummond, Dr S A, COMPPGSTAFF07', 'D/E216A']
    ['COMP2191/LECT/002', '55', 'Software Engineering Lecture', '11-20, 25-33, 39-41', 'Drummond, Dr S A, Budgen, Prof D', 'D/E102']
    ['COMP2191/WORK/001', '55', 'Software Engineering Workshop', '11-20, 25-33', 'Drummond, Dr S A', 'D/E102']
    ['COMP2191/LECT/001', '55', 'Software Engineering Lecture', '11-20, 25-33, 39-41', 'Drummond, Dr S A, Budgen, Prof D', 'D/E102']
    ['COMP2191/LECT/002', '55', 'Software Engineering Lecture', '11-20, 25-33, 39-41', 'Drummond, Dr S A, Budgen, Prof D', 'D/E102']
    ['COMP2191/LECT/001', '55', 'Software Engineering Lecture', '11-20, 25-33, 39-41', 'Drummond, Dr S A, Budgen, Prof D', 'D/E102']
    ['COMP2191/LECT/002', '55', 'Software Engineering Lecture', '11-20, 25-33, 39-41', 'Drummond, Dr S A, Budgen, Prof D', 'D/E102']
    ['COMP2201/LECT/001', '40', 'Group Project Lecture', '11-20, 25, 28-33', 'Obara, Dr B, Drummond, Dr S A', 'D/W205']
    ['COMP2201/PRAC/001', '40', 'Group Project Practical', '11-20, 25, 28-33, 39', 'Mertzios, Dr G, Drummond, Dr S A, COMPPGSTAFF07', 'D/E216A']
    ['COMP2201/LECT/002', '40', 'Group Project Lecture', '11-20, 25-33', 'Obara, Dr B, Drummond, Dr S A', 'D/E102']
    ['COMP2201/ADD/001', '40', 'Group Project Lecture', '26-27', 'D/EH202']"""


credentials = get_credentials()



modules = getmodules.get_module_dict(credentials[0],credentials[1])
module_codes = [x.split(" - ")[0] for x in modules['COMP'][0:6]]
schedule_items = parse_timetable.get_schedule_items(module_codes, credentials)

コード例 #41
0
ファイル: start.py プロジェクト: nsfyn55/apa-stat-scraper
import requests
import bs4
import utils

#get credentials
username, password = utils.get_credentials()

# get a response body from initial request
session = requests.Session()
resp = session.get("https://members.poolplayers.com")

# ------ Process Main Page Response ---------
soup = bs4.BeautifulSoup(resp.text, 'html.parser')
payload = utils.get_hidden_fields(soup)

# Decorate with Login Specific Fields
payload["DES_Group"] = ''
payload["DES_JSE"] = 1
payload["ctl00$cplhPublicContent$Login1$txtUserID"] = username
payload["ctl00$cplhPublicContent$Login1$txtPassword"] = password
payload["ctl00$cplhPublicContent$Login1$btnLogin"] = ""

resp = session.post('https://members.poolplayers.com/Default.aspx', data=payload)
payload = {}

# ------ Process Login Response ---------
soup = bs4.BeautifulSoup(resp.text, 'html.parser')
payload = utils.get_hidden_fields(soup)

# Get League Selection
league_select_box = soup.find('select', {"name":"ddlSelectedLeague"})
コード例 #42
0
ファイル: upload_data.py プロジェクト: Svetttta/test_gcp_func
import os

from google.cloud import storage

from config import PROJECT_ID, INPUT_BUCKET, SERVICE_CREDENTIALS, root_path
from utils import get_credentials

DATA_DIR = 'data'


def upload_files(gcp_bucket, source_folder):
    """Upload files to GCP bucket."""
    data_path = os.path.join(root_path, source_folder)
    files_list = os.listdir(data_path)

    for file_name in files_list:
        file_path = os.path.join(data_path, file_name)

        if os.path.isfile(file_path):
            blob = gcp_bucket.blob(file_name)
            blob.upload_from_filename(file_path)


if __name__ == '__main__':
    client = storage.Client(project=PROJECT_ID, credentials=get_credentials(SERVICE_CREDENTIALS))
    bucket = client.get_bucket(INPUT_BUCKET)
    upload_files(bucket, DATA_DIR)
コード例 #43
0
'''
This script's job is to identify all data where MIDPOINT REGISTRATION is needed
1. CHECK IF DONE (QUERY analysis/midpoint/) to see if midpoint T1 images created using: SPM12, ANTS and NIFTY_REG exist.
2. CHECK IF PREREQ IS THERE: T1 baseline and T1 followup
3. ADD TO LIST, remove duplicates and print
'''
import requests
import csv
import datetime as dt
import os
from utils import refresh_cookies, get_credentials

requests.packages.urllib3.disable_warnings()
xnat_host = 'https://nimg1946.cs.ucl.ac.uk'
username, login, pw = get_credentials(xnat_host,
                                      os.path.expanduser('~/.daxnetrc'))
headers = refresh_cookies(xnat_host, username, pw, None)
data_root = '/SAN/medic/insight46'

#Get all PET-MR sessions
url_sessions=xnat_host+'/REST/experiments?' + \
    'xsiType=xnat:petmrSessionData&project=1946'
r = requests.get(url_sessions, headers=headers)
session_list = r.json()
num_sessions = 0
sessions_to_submit = []
methods = ['spm12', 'ants', 'nifty-reg']

#For each session: check if present on central storage.
for session in sorted(session_list['ResultSet']['Result'],
                      key=lambda k: k['subject_label']):
コード例 #44
0
from flask import Flask, request
from flask_cors import CORS
import json

from utils import get_credentials, update_plants_json, update_devices_json, get_dates_between, get_plant_by_id
from files_maker import GrowattFileMaker, SolarEdgeFileMaker
from growatt.api import GrowattApi
from solaredge.api import SolarEdgeApi

app = Flask(__name__)
CORS(app)

credentials = get_credentials()
growatt = GrowattApi(credentials["growatt"]["username"],
                     credentials["growatt"]["password"])
solaredge = SolarEdgeApi(credentials["solaredge"]["api_token"],
                         credentials["solaredge"]["username"],
                         credentials["solaredge"]["password"])

g_maker = GrowattFileMaker()
se_maker = SolarEdgeFileMaker()


@app.route("/refresh-plants")
def refresh_plants():
    growatt_plants = growatt.get_plants()
    update_plants_json("growatt", growatt_plants)
    solaredge_plants = solaredge.get_plants()
    update_plants_json("solaredge", solaredge_plants)

    growatt_devices = growatt.get_all_devices()
コード例 #45
0
ファイル: core.py プロジェクト: zhivko95/syncmonitor
import dirtracker
import syncmonitor
import utils
import threading

user_placeholder = 'USER'

#---------------------------------------------------------
# Run order
#---------------------------------------------------------
if __name__ == '__main__':

    config = utils.get_config()
    credentials = utils.get_credentials(config)
    aws_session = utils.get_session(credentials, config)

    # Start the sync monitor in a separate daemon thread.
    sync = threading.Thread(target=syncmonitor.sync_monitor, args=(aws_session, config), daemon=True)
    sync.start()
    utils.write_to_log('Started sync monitor.')

    # Start monitoring for new files in directories using main thread.
    utils.write_to_log('Started directory trackers.')
    dirtracker.track_directories(config['folders'], aws_session, config['bucket'])
コード例 #46
0
ファイル: app.py プロジェクト: Ogreman/gigcalendar
import os
import logging
from utils import create_event, get_credentials, list_events, quick_create_event
from flask import Flask, request, jsonify
from apiclient.http import HttpError

FORMAT = "%(asctime)-15s: %(levelname)s: %(message)s"
logging.basicConfig(format=FORMAT, filename="gigs.log", level=logging.INFO)

app = Flask(__name__)
app.google_credentials = get_credentials()

APP_TOKENS = [
    val
    for key, val in os.environ.items()
    if key.startswith('APP_TOKEN')
]
CALENDARS = ['bristol', 'notts']

# event = {
#   'summary': 'Test',
#   'start': {
#     'date': '2015-09-14'
#   },
#   'end': {
#     'date': '2015-09-15'
#   }
# }


def add_gig(calendar, text):
コード例 #47
0
def test_get_credentials():
    print get_credentials('Documentcloud')
コード例 #48
0
def test_upload_pdf_to_documentcloud():
    print upload_pdf_to_documentcloud(TEST_PDF,
                                      get_credentials('Documentcloud'))
コード例 #49
0
def test():
    from utils import get_credentials
    credentials = get_credentials()
    module_selection = ModuleSelection(*credentials)
    module_selection.mainloop()
コード例 #50
0
def sync_ddocs(dburl):
    auth_filters = get_credentials()
    db = Database(dburl, filters=auth_filters)

    loader = FileSystemDocsLoader('_design')
    loader.sync(db, verbose=True)
コード例 #51
0
                    tweet_string = json.dumps(mtweet)
                    tweets.append(tweet_string)
            else:
                # pause before checking again
                print 'sleeping...'
                time.sleep(WAIT)
        if len(tweets) >= CHUNK:
            write_to_pubsub(pubsub, tweets)
            tweets = []
        count += 1
        if count % 25 == 0:
            print("processing count: %s of %s at %s" %
                  (count, count_max, datetime.datetime.now()))


if __name__ == '__main__':
    ingest_topic_info = PUBSUB_TOPIC_INGEST.split('/')
    ingest_topic_name = ingest_topic_info[-1]
    ingest_sub_name = "tweets-%s" % ingest_topic_name
    print "starting modeling...."
    credentials = utils.get_credentials()
    pubsub = utils.create_pubsub_client(credentials)
    try:
        # TODO: check if subscription exists first
        subscription = utils.create_subscription(pubsub, PROJECT_ID,
                                                 ingest_sub_name,
                                                 PUBSUB_TOPIC_INGEST)
    except Exception, e:
        print e
    model_tweets(pubsub, ingest_sub_name)
    print 'exited write loop'
コード例 #52
0
def test():
    from utils import get_credentials
    module_list = ["COMP2181"]
    details = get_credentials()
    create_timetable(module_list, details)
コード例 #53
0
ファイル: Track.py プロジェクト: lizzard/clouseau
                                                 params = { 'product': 'Firefox',
                                                            'signature': '=' + self.signature,
                                                            'date': ['>=' + utils.get_date_str(self.date),
                                                                     '<' + utils.get_date_str(self.date + timedelta(self.day_delta))],
                                                            'release_channel': 'nightly',
                                                            '_sort': 'build_id',
                                                            '_columns': ['uuid', 'topmost_filenames'],
                                                            '_facets': ['platform_pretty_version', 'build_id', 'version', 'release_channel', 'system_memory_use_percentage', 'addons'],
                                                            '_results_number': 100,
                                                                },
                                                 headers = header,
                                                 timeout = self.TIMEOUT,
                                                 background_callback = self.__info_cb))


#t = Track('msvcr120.dll@0xf608 | nsZipItemPtr<T>::Forget', '2016-02-25')
#t = Track('mozilla::gfx::DrawTargetCairo::FillGlyphs', '2016-02-27', day_delta = 3)
#t = Track('nss3.dll@0x1eab60 | GetFileInfo', '2016-02-28', day_delta = 2)
#t = Track('PR_DestroyThreadPrivate | PR_CleanupThread | PR_NativeRunThread | pr_root', '2016-02-26')
t = Track('mp4parse_new', '2016-02-28', credentials = utils.get_credentials('/home/calixte/credentials.json'))
#t = Track('mozilla::ipc::MessageListener::IntentionalCrash', '2016-02-27', day_delta = 3)
#t = Track('js::gc::GCRuntime::sweepBackgroundThings', '2015-12-22', day_delta = 3)
#t = Track('nsCOMPtr_base::assign_from_qi | nsCOMPtr<T>::nsCOMPtr<T> | nsDocShell::EnsureFind', '2016-02-29', day_delta = 2)
#t = Track('mozilla::layers::TextureWrapperImage::GetAsSourceSurface', '2015-12-12', day_delta = 2)
#t = Track('PLDHashTable::Remove', '2015-12-29', day_delta = 10)
#t = Track('mozilla::net::nsHttpTransaction::WriteSegments(mozilla::net::nsAHttpSegmentWriter*, unsigned int, unsigned int*)', '2015-04-13', day_delta = 1) 
#t = Track('mozilla::ipc::MessageChannel::ShouldDeferMessage', '2016-03-01', day_delta = 2)
#t = Track('mozalloc_abort | NS_DebugBreak | nsDebugImpl::Abort | XPTC__InvokebyIndex', '2016-03-01', day_delta = 2) 

pprint(t.get())