Example #1
0
def NREL_station_lookup(lat, lon, year, interval='60', utc='false'):
    """
    Returns dataframe of NREL weather observations for the given latitude, longitude, and year

    :param float lat: lat to lookup weather data on
    :param float lon: lon to lookup weather data on
    :param integer year: year to look date up in
    :param string interval: 30 or 60 intervals for weather observations (string to fit in url)
    :param string utc: TRUE/FALSE to convert observations to UTC (string to fit in url)
    :return: dataframe of NREL weather observations for that year
    :rtype: dataframe
    """
    cred = Credentials.Credentials()

    leap_year = is_leap_year(year)

    # According to site pressure could also be added but was unable to work it out
    # https://nsrdb.nrel.gov/data-sets/api-instructions.html
    attributes = 'wind_speed,air_temperature,solar_zenith_angle,wind_direction,dew_point,relative_humidity'
    attributes = 'ghi,dhi,dni,wind_speed,air_temperature,solar_zenith_angle,wind_direction,dew_point,relative_humidity'

    api_key = cred.api_key()
    your_name = cred.your_name()
    reason_for_use = cred.reason_for_use()
    your_affiliation = cred.your_affiliation()
    your_email = cred.your_email()
    mailing_list = cred.mailing_list()

    url = 'https://developer.nrel.gov/api/nsrdb/v2/solar/psm3-download.csv?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap}&interval={interval}&utc={utc}&full_name={name}&email={email}&affiliation={affiliation}&mailing_list={mailing_list}&reason={reason}&api_key={api}&attributes={attr}'.format(
        year=year,
        lat=lat,
        lon=lon,
        leap=leap_year,
        interval=interval,
        utc=utc,
        name=your_name,
        email=your_email,
        mailing_list=mailing_list,
        affiliation=your_affiliation,
        reason=reason_for_use,
        api=api_key,
        attr=attributes)

    dftemp = pd.read_csv(url, skiprows=2)
    dftemp['Wind Speed'] = dftemp[
        'Wind Speed'] * 2.236936  # Convert wind speed from m/s to mph
    dftemp['lat'] = lat
    dftemp['lon'] = lon
    # Set the time index in the pandas dataframe:
    dftemp = dftemp.set_index(
        pd.date_range('1/1/{yr}'.format(yr=year),
                      freq=interval + 'Min',
                      periods=525600 / int(interval)))
    return dftemp
Example #2
0
 def __init__(self, coms, master=None):
     super().__init__(master)
     # initializing the gui window
     self.coms = coms
     self.width = 1000
     self.height = 700
     master.geometry("" + str(self.width) + "x" + str(self.height))
     master.title("Pacemaker Control Center")
     self.pack()
     self.master = master
     self.create_login()
     self.creds = Credentials()
Example #3
0
    def connect(self, given_password):
        # type: (Optional[str]) -> None

        if not given_password:
            creds = Credentials(self.account_key, self.username, None)

            try:
                stored_access_token = self.get_credentials(creds)
            except Exception as e:
                print(e)
                raise Exception("For user %s: Password not provided and not found in keyring" % self.username)

            self.connection = teslajson.Connection(email=self.username, access_token=stored_access_token)

        else:
            self.connection = teslajson.Connection(email=self.username, password=given_password)
Example #4
0
    def test_that_user_can_create_an_account_with_valid_data(self):
        driver = self.driver
        driver.get(URL)
        time.sleep(5)
        register = HomePage(driver)
        register.accept_agreement()
        register.select_signup()
        time.sleep(5)
        register.enter_email_to_register(email=Credentials.random_char(7)+"@gmail.com")
        register.enter_password_to_register(password=Credentials.password)
        register.verify_password_entered(password=Credentials.password)
        register.accept_condition()
        register.register()
        time.sleep(10)
        MyAccount = MyAccountPage(driver)
        text = MyAccount.get_my_account_text()
        if text == 'MIJN ACCOUNT':

            print("User registered successfully")

        time.sleep(10)
    def __init__(self,
                 hostname='10.128.0.7',
                 credentials='cred.yml',
                 db='bubbles',
                 port=49162):
        self.hostname = hostname
        self.cred = Credentials(credentials).get()
        self.db = db
        self.port = port

        #Create Connection information
        conn_url = {
            'drivername': 'postgres',
            'username': self.cred['user'],
            'password': self.cred['password'],
            'host': self.hostname,
            'port': self.port,
            'database': self.db
        }
        self.eng = create_engine(URL(**conn_url), client_encoding='utf8')
        self.meta = MetaData(bind=self.eng, reflect=True)
Example #6
0
class Imap:
    creds = Credentials.Credentials()

    def init(self):
        pass

    def checkAccount(self, email, pw):

        con = Imap.creds.getConnectionInfo(email)

        if (con == False):
            print(Fore.RED + "[!]\tNo connection data in connections.json!" + Style.RESET_ALL + "\n")
        else:
            print("[!]\tTesting Imap connection: " + con['host'] + " ...")

            c = None

            # print(con['host'])

            try:
                c = imaplib.IMAP4_SSL(con['host'])

                #5 second timeout
                # socket.setdefaulttimeout(5)

                c.login(email, pw)
                print(Style.BRIGHT + Fore.GREEN + "[+]\tSuccess!" + Style.RESET_ALL + "\n")
                return True
            except imaplib.IMAP4.abort as e:
                print(Style.BRIGHT + Fore.YELLOW + "[-]\tIMAP Aborted: " + str(e) + Style.RESET_ALL + "\n")
            except imaplib.IMAP4.error as ie:
                print(Style.BRIGHT + Fore.RED + "[-]\tIMAP Error: " + str(ie) + Style.RESET_ALL + "\n")
            except Exception as e:
                print(Style.BRIGHT + Fore.RED + "[-]\tFail: " + str(e) + Style.RESET_ALL + "\n")
                print traceback.format_exc()
            finally:
                if (c is not None):
                    c.logout()

        return False
Example #7
0
    def test_that_registered_user_can_add_a_new_address(self):
        driver = self.driver
        driver.get(URL)
        time.sleep(5)
        register = HomePage(driver)
        register.accept_agreement()
        register.select_signup()
        time.sleep(5)
        email = Credentials.random_char(7)+"@gmail.com"
        password = Credentials.password
        register.enter_email_to_register(email)
        register.enter_password_to_register(password)
        register.verify_password_entered(password)
        register.accept_condition()
        register.register()
        MyAccount = MyAccountPage(driver)
        time.sleep(15)
        MyAccount.go_to_address_book()
        time.sleep(10)
        MyAccount.add_new_address()
        time.sleep(10)
        MyAccount.enter_first_name(fname=Credentials.fName)
        MyAccount.enter_last_name(lname=Credentials.lName)
        MyAccount.enter_street(street=Credentials.street)
        MyAccount.enter_house_number(house_number=Credentials.houseno)
        MyAccount.enter_city(city=Credentials.city)
        MyAccount.enter_zip_code(zip=Credentials.zip)
        MyAccount.save_address()
        time.sleep(10)
        #After hitting save, the My Account page loaded with given address details, in this page title = My Account and sub title is addressbook
        title = MyAccount.get_my_account_text()
        sub_title = MyAccount.get_address_as_subtitle()

        if title == 'MIJN ACCOUNT' and sub_title == 'ADRESBOEK':
            print('address added successfully by newly registered user')

        time.sleep(10)
import Credentials

youtube = Credentials.run()


def add_playlist(youtube, title, description=None, privacyStatus='private'):
    body = dict(snippet=dict(title=title, description=None),
                status=dict(privacyStatus='private'))

    try:
        playlists_insert_response = youtube.playlists().insert(
            part='snippet,status', body=body).execute()

    except:
        print("CreatePlaylist Error")
        return None

    print('New playlist Name: %s' % title)
    print('New playlist ID: %s' % playlists_insert_response['id'])

    return playlists_insert_response['id']


def run(youtube, title, privacy):

    playlistID = []

    for index in range(len(title)):
        playlistID = add_playlist(youtube, title[index], privacy)

    return playlistID
Example #9
0
import Credentials
import State
import pprint
from json import loads, dumps
from watson_developer_cloud import PersonalityInsightsV3 as PIV3
from watson_developer_cloud.personality_insights_v3 import Content

service = PIV3(version='2017-10-13', iam_apikey=Credentials.watsonKey(),
               url='https://gateway.watsonplatform.net/personality-insights/api')


def analisarTexto():
    texto = 'Just arrived in the United Kingdom. The only problem is that @CNN is the primary source of news available from the U.S. After watching it for a short while, I turned it off. All negative & so much Fake News, very bad for U.S. Big ratings drop. Why doesn’t owner @ATT do something? London part of trip is going really well. The Queen and the entire Royal family have been fantastic. The relationship with the United Kingdom is very strong. Tremendous crowds of well wishers and people that love our Country. Haven’t seen any protests yet, but I’m sure the Fake News will be working hard to find them. Great love all around.Also, big Trade Deal is possible once U.K. gets rid of the shackles. Already starting to talk!'
    #texto = State.load()

    #json content_type='application/json'


    profile = service.profile(texto, content_type='text/plain;charset=utf-8', raw_scores=True,
                              consumption_preferences=False, accept_language='pt-br').get_result()
    pprint.pprint(profile)
    State.save(profile)
Example #10
0
    def run(self):
        self.done = False
        print "Welcome! Type '/quit' to exit or '/help' for assistance."
        print "Login/sign-up below:\n"
        while True:
            tempUser = raw_input("Please enter a username: "******"InvalidUsername"] +
                                 "\n").strip()
            if tempUser == "/quit":
                self.quit()
            elif tempUser == "/help":
                print self.helpText
                continue

            tempPass = raw_input(
                "Please enter your password, if your account does not exist, you will be prompted to sign up: "
                + self.credential_errors["InvalidPassword"] + "\n").strip()
            if tempPass == "/quit":
                self.quit()
            elif tempPass == "/help":
                print self.helpText
                continue

            try:
                self.userId = self.wrapper.login(tempUser, tempPass)
                print "Login complete!"
                break
            except (invalidCredentialsException, parametersMissingException,
                    ServerWrapperException) as ex:
                if type(ex) == invalidCredentialsException:
                    print self.credential_errors["Invalid_pairing"]
                elif type(ex) == parametersMissingException:
                    print self.credential_errors["ParametersMissing"]
                else:
                    print "Error occured while trying to perform operation"

                while True:
                    response = raw_input(
                        "Press 's' to sign up as a new user with the credentials you enetered or press any key to retry login\n"
                    ).strip()
                    if response == 's':
                        print "Beginnng sign up process..."
                        try:
                            self.userId = self.wrapper.signup(
                                tempUser, tempPass)
                            print "Sign up complete, you are now logged in"
                            self.done = True
                            break
                        except (duplicateUsernameException,
                                invalidUsernameException,
                                invalidPasswordException,
                                parametersMissingException,
                                ServerWrapperException) as exx:
                            if type(ex) == duplicateUsernameException:
                                print self.credential_errors[
                                    "DuplicateUsername"]
                            elif type(ex) == invalidUsernameException:
                                print self.credential_errors["InvalidUsername"]
                            elif type(ex) == invalidPasswordException:
                                print self.credential_errors["InvalidPassword"]
                            elif type(ex) == ServerWrapperException:
                                print "Error occured while trying to perform operation"
                            else:
                                print self.credential_errors[
                                    "ParametersMissing"]
                    elif response == "/quit":
                        self.quit()
                    elif response == "/help":
                        print self.helpText
                        continue
                    else:
                        break
                if self.done:
                    break

        print self.helpText
        print "This guide can be accessed again with the /help command\n"
        self.cred = Credentials(self.userId, tempPass)
        self.chat = Chat(self.cred, self.wrapper)
        self.chat.run()
Example #11
0
def main():
    data_df = pd.read_csv("companies_list.csv")
    cred = CD.Credentials()
    accessToken = cred.getAccessToken()
    counter = 1
    row_count = 0
    searchCriteria = SC.SearchCriteria(accessToken)
    dataConnection = DC.DataConnection(accessToken)
    folderName = str(time.strftime("%d%m%Y_%H%M%S"))
    os.makedirs(folderName)
    print("Data is being stored in the " + folderName + " folder")
    path_list = []

    for index, row in data_df.iterrows():
        if counter + 71 <= 1000:
            timer = random.randrange(5, 60, 1)
            duns_list = searchCriteria.getDunsNumber(row["searchTerm"],
                                                     row["countryCode"])
            counter += 1
            #time.sleep(timer)

            for duns in duns_list:

                api_retrived_data = dataConnection.AASMCU(duns)
                counter += 1
                #time.sleep(timer)
                path = JFW.JsonFileWriter(folderName + "/AASMCU.txt").write(
                    api_retrived_data, duns, "AASMCU")
                if path not in path_list:
                    if path is not None:
                        path_list.append(path)

                api_retrived_data = dataConnection.AASDHQ(duns)
                counter += 1
                #time.sleep(timer)
                path = JFW.JsonFileWriter(folderName + "/AASDHQ.txt").write(
                    api_retrived_data, duns, "AASDHQ")
                if path not in path_list:
                    if path is not None:
                        path_list.append(path)

                api_retrived_data = dataConnection.LNKALT(duns)
                counter += 1
                #time.sleep(timer)
                path = JFW.JsonFileWriter(folderName + "/LNKALT.txt").write(
                    api_retrived_data, duns, "LNKALT")
                if path not in path_list:
                    if path is not None:
                        path_list.append(path)

                api_retrived_data = dataConnection.LNKUPD(duns)
                counter += 1
                #time.sleep(timer)
                path = JFW.JsonFileWriter(folderName + "/LNKUPD.txt").write(
                    api_retrived_data, duns, "LNKUPD")
                if path not in path_list:
                    if path is not None:
                        path_list.append(path)

                api_retrived_data = dataConnection.CMPELK(duns)
                counter += 1
                #time.sleep(timer)
                path = JFW.JsonFileWriter(folderName + "/CMPELK.txt").write(
                    api_retrived_data, duns, "CMPELK")
                if path not in path_list:
                    if path is not None:
                        path_list.append(path)

                if row["countryCode"] == "US":
                    api_retrived_data = dataConnection.AASBIG(duns)
                    counter += 1
                    #time.sleep(timer)
                    path = JFW.JsonFileWriter(folderName +
                                              "/AASBIG.txt").write(
                                                  api_retrived_data, duns,
                                                  "AASBIG")
                    if path not in path_list:
                        if path is not None:
                            path_list.append(path)

                    api_retrived_data = dataConnection.LNKMIN(duns)
                    counter += 1
                    #time.sleep(timer)
                    path = JFW.JsonFileWriter(folderName +
                                              "/LNKMIN.txt").write(
                                                  api_retrived_data, duns,
                                                  "LNKMIN")
                    if path not in path_list:
                        if path is not None:
                            path_list.append(path)

            row_count = row_count + 1

    fileCloser(path_list)
    data_df[0:row_count].to_csv("completed_list.csv")
    data_df[row_count:].to_csv("companies_list.csv")
Example #12
0
    def __parseCSV(self):

        c = None
        cred = Credentials.Credentials()

        try:
            pprint(self.app.pargs)

            # if tor flag isset will attempt to connect
            if (self.app.pargs.tor):
                self.tor.connect(self.app.pargs.torport)

            lines = 0
            c = open(self.app.pargs.csv)
            ch = csv.reader(c)

            # lines = int(len(list(ch)))

            # if (self.app.pargs.verbose):
            # print("[!] Total Lines: %u" % lines)

            #default email/password columns
            emailCol = 0
            passwdCol = 1
            hasHeader = True

            # gets some basic info about file to properly parse file
            firstLineHeader = raw_input(
                "[?] Does the first row contain column titles? [Y/n]: ")

            if (firstLineHeader.upper() == 'N'):
                hasHeader = False
                lines -= 1

            emailCol = int(
                raw_input(
                    "[?] Enter column number of email login. 0 equals column A. [0]: "
                ) or emailCol)
            passwdCol = int(
                raw_input("[?] Enter column number of passwords [1]: ")
                or passwdCol)

            o = Output.Output()
            if (self.app.pargs.output is not None):
                # print("OUTPUT:" + self.app.pargs.output)
                oFile = o.createFile(self.app.pargs.output)

            for k, r in enumerate(ch):

                if (k == 0 and hasHeader == True):
                    if (self.app.pargs.verbose):
                        print("\n[!] Skipping header row")
                    continue

                email = cred.checkEmail(r[emailCol])
                pw = cred.checkPassword(r[passwdCol])

                if (email == False):
                    print("[-] Not a valid email address... skipping.")
                    continue

                if (pw == False):
                    print(Fore.RED + "[-] " + email +
                          ": Password is empty... skipping." +
                          Style.RESET_ALL + "\n")
                    continue

                print(Style.BRIGHT + "[" + str(k + 1) + "] Checking: " +
                      email + ":" + pw + Style.RESET_ALL)

                if (self.app.pargs.imap):
                    validImap = self.imap.checkAccount(email, pw)

                    if (validImap == True
                            and self.app.pargs.output is not None):
                        o.addRow(email, pw)

                    # if valid login and saving success result - will add csv row
                    if (self.app.pargs.pop):
                        print("POP not currently supported")

                # if delay is set
                # print('DELAY:' + self.app.pargs.delay)
                if float(self.app.pargs.delay or 0.5) > 0:
                    sleep(float(self.app.pargs.delay or 0.5))

        except ValueError:
            print(Style.BRIGHT + Fore.RED + "[-] Invalid input value: " +
                  str(e) + Style.RESET_ALL)
        except Exception as e:
            print(Style.BRIGHT + Fore.RED + "[-] Error parsing CSV File: " +
                  str(e) + Style.RESET_ALL)
            print traceback.format_exc()
        finally:
            c.close()

            # if tor flag is set, will disconnect before exiting
            if (self.app.pargs.tor):
                baseController.tor.disconnect()

        print("\n" + Style.BRIGHT + "[*] Finished. Exiting!" +
              Style.RESET_ALL + "\n")

        return True
        # Show how many tweets have been stored
        self.tweetsStored += 1
        print('Tweets stored: ' + str(self.tweetsStored))

        # Check if time limit has been surpassed
        current_time = time.time()
        has_duration_ended = (current_time - self.start_time) > self.duration
        # Returning false will stop collection of tweets
        return not has_duration_ended

    def on_error(self, status_code):
        print status_code
        return False

if __name__ == '__main__':

    credentials_Location = "TwitterAPICredentials.json"

    auth = Credentials.getAuthentication(credentials_Location)

    output_location = 'tweets.db'
    streaming_duration = input('How many seconds would you like to collect Tweets for? ')
    myStreamListener = SQLDurationStreamListener(streaming_duration, output_location.strip())
    myStream = tweepy.Stream(auth, myStreamListener)
    try:
        myStream.filter(track=['python'])
    except KeyboardInterrupt:
        pass
    print_db_values(output_location, 'tweets')
Example #14
0
def main(topic_list, conf_matrix, save_master_data):
    skill_list = process_list_argument(topic_list, val_type=str)
    master_skill_id = None  # so exception works
    master_thresh = Credentials.calculate_workspace_thresh("master")

    try:
        id_dict = {
            skill: Credentials.workspace_id[active_adoption][skill]
            for skill in skill_list
        }
        timestr = generate_timestamp()  #  for use in all filenames

        # authenticate
        if "apikey" in instance_creds:
            logger.debug("Authenticating (apikey)")
            bs = blindset.blindset(
                apikey=instance_creds["apikey"],
                url=instance_creds["url"],
                version=conversation_version,
            )
        elif "password" in instance_creds:
            logger.debug("Authenticating (username/password)")
            bs = blindset.blindset(
                username=instance_creds["username"],
                password=instance_creds["password"],
                url=instance_creds["url"],
                version=conversation_version,
            )

        # check skills exist
        check_skills_exist(skill_list)

        #  import blindsets and generate master
        logger.info("Importing all blindsets and combining into master")
        blind_dict = dict()
        for skill in skill_list:
            bs_path = os.path.join(config.data_dir, f"{skill}_blindset.csv")
            blind_dict[skill] = bs.import_blindset(bs_path)

        master_blind_allcols = pd.concat(
            [v.assign(topic=k) for k, v in blind_dict.items()],
            axis=0,
            ignore_index=True,
            sort=False,
        )
        master_blind = master_blind_allcols[[
            "utterance", "topic"
        ]].rename(columns={"topic": "expected intent"})

        # generate master from topic training and push to WA
        logger.info("Getting training data from WA")
        train_dict = dict()
        for skill in skill_list:
            train_dict[skill] = wa_utils.get_training_data(
                bs.assistant, id_dict[skill])

        logger.info("Creating temporary master skill")
        master_train = pd.concat(
            [
                v.drop(columns=["intent"]).assign(intent=k)
                for k, v in train_dict.items()
            ],
            axis=0,
            ignore_index=True,
            sort=False,
        )
        master_skill_id = wa_utils.create_workspace_from_df(
            bs.assistant,
            name="master",
            train_df=master_train,
            description="generated by intent_training_tools",
        )

        # run blindset on master
        logger.info("Running blindset on master..")
        results_master = bs.run_blind_test(master_blind,
                                           master_skill_id,
                                           threshold=master_thresh)
        results_master["routing"] = results_master["intent1"]
        results_master.loc[results_master["confidence1"] < master_thresh,
                           "routing"] = "anything_else"

        # create blindsets for topics based on master results
        newblind_dict = dict()
        for skill in skill_list:
            # blindset for each skill is made up of utterances that have landed in that skill for master
            blind_utterances = results_master.loc[
                (results_master["intent1"] == skill)
                & (results_master["confidence1"] >= master_thresh),
                "original_text", ].tolist()
            newblind = master_blind_allcols[master_blind_allcols["utterance"].
                                            isin(blind_utterances)].copy()
            newblind.loc[newblind["topic"] != skill,
                         "expected intent"] = "anything_else"
            newblind_dict[skill] = newblind[["utterance", "expected intent"
                                             ]].reset_index(drop=True)

        # run blindsets on topics
        logger.info("Running blindset on topic skills..")
        results_dict = dict()
        for skill in skill_list:
            results_dict[skill] = bs.run_blind_test(
                newblind_dict[skill],
                id_dict[skill],
                threshold=Credentials.calculate_workspace_thresh(skill),
            )

        #  plot confusion matrices
        if conf_matrix:
            from conversation_test.confusionmatrix import ConfusionMatrix

            conf_output_path = lambda s: os.path.join(
                config.output_folder, f"{s}_multi_confmat_{timestr}.png")

            # master
            cfn = ConfusionMatrix(workspace_thresh=master_thresh)
            cfn.create(results_master, fig_path=conf_output_path("master"))

            #  topics
            for skill in skill_list:
                cfn = ConfusionMatrix(workspace_thresh=Credentials.
                                      calculate_workspace_thresh(skill))
                cfn.create(results_dict[skill],
                           fig_path=conf_output_path(skill))

            logger.info("Confusion matrix saved to results folder")

        # calculate metrics
        # master
        met = Metrics(workspace_thresh=master_thresh)
        metrics_master, _ = met.get_all_metrics(results_master,
                                                detailed_results=True)

        # topics
        metrics_dict = dict()
        res_with_conf_dict = dict()
        for skill in skill_list:
            met = Metrics(
                workspace_thresh=Credentials.calculate_workspace_thresh(skill))
            metrics_dict[skill], res_with_conf_dict[
                skill] = met.get_all_metrics(results_dict[skill],
                                             detailed_results=True)

        # topics - create overall view as if it's a single skill
        topics_res_with_conf = pd.concat(
            [v for k, v in res_with_conf_dict.items()],
            ignore_index=True,
            sort=False)

        results_master.loc[results_master["routing"] == "anything_else",
                           'confusion'] = 'FN'

        topics_res_with_conf = topics_res_with_conf.append(
            results_master,
            ignore_index=True,
            sort=False,
        )
        metrics_overall = met.calculate_metrics_per_intent(
            topics_res_with_conf, detailed_results=True)

        metrics_overall.loc[metrics_overall.index.isin(skill_list),
                            'threshold'] = master_thresh
        metrics_overall = metrics_overall.rename(
            index={s: s + ' - anything else'
                   for s in skill_list})

        # export results
        for skill in skill_list:
            results_dict[skill].to_csv(
                os.path.join(config.output_folder,
                             f"{skill}_multi_results_{timestr}.csv"),
                index=None,
            )
            metrics_dict[skill].to_csv(
                os.path.join(config.output_folder,
                             f"{skill}_multi_metrics_{timestr}.csv"))

        results_master.to_csv(
            os.path.join(config.output_folder,
                         f"master_multi_results_{timestr}.csv"),
            index=None,
        )
        metrics_master.to_csv(
            os.path.join(config.output_folder,
                         f"master_multi_metrics_{timestr}.csv"))
        metrics_overall.to_csv(
            os.path.join(config.output_folder,
                         f"overall_multi_metrics_{timestr}.csv"))
        logger.info("Results and metrics saved to output folder")

        if save_master_data:
            # export master blindset with both intent and topic labels to CSV
            master_blind_allcols.to_csv(
                os.path.join(config.data_dir,
                             f"master_blindset_{timestr}.csv"),
                index=None,
            )

            # export master training to CSV
            master_train.to_csv(
                os.path.join(config.data_dir,
                             f"master_training_{timestr}.csv"),
                header=None,
                index=None,
            )

            logger.info(
                "Master blindset and training have also been saved to the data folder"
            )

        #  delete master skill
        logger.info("Deleting temporary master skill")
        wa_utils.delete_workspace(bs.assistant, master_skill_id)

    except Exception as e:
        if master_skill_id is not None:
            # make sure master deleted anyway
            logger.info("Deleting temporary master skill before exit")
            wa_utils.delete_workspace(bs.assistant, master_skill_id)

        raise e
Example #15
0
 def __init__(self, params):
     self.params = params
     self.credentials = Credentials.Credentials(params["credentials"])
Example #16
0
        NBClassifier, bestCount_Vectorizer, best_tfidf, interestLabels = train(
            trainingSet)
        print()
        print("Initiating Testing...")
        print()
        testClassifier(NBClassifier, testSet, bestCount_Vectorizer)
        printMetrics(NBClassifier, best_tfidf, interestLabels, "NBClassifier")
        print()
        print("Starting Live tweets")
        print("Press Control+C to exit...")

    def on_status(self, status):
        tweet = str(status.text).lower()
        if any(i in tweet for i in targetWords):
            preprocessedtweet = preprocess(tweet)
            predictedlabel = predictInterest([preprocessedtweet], NBClassifier,
                                             bestCount_Vectorizer)
            print(tweet)
            print(label[predictedlabel[0]])

    def on_error(self, status_code):
        if status_code == 420:
            return False


auth = Credentials.authenticate()
twitterStream = Stream(auth, listener())
twitterStream.filter(locations=[-97.318268, 32.760717, -96.600723, 33.207095],
                     languages=["en"],
                     stall_warnings=True)
import spotipy.util as util
import spotipy
import time
import Credentials

scope = "user-read-currently-playing"
Credentials.SetCreditentials()

token = util.prompt_for_user_token(username='', scope=scope)
track = spotipy.Spotify(token).current_user_playing_track()  #JSON to Dict

nameTrack = track['item']['name']
artistsTrack = track['item']['artists'][0]['name']
albumTrack = track['item']['album']['name']
progressTrack = track['progress_ms'] / 1000
durationTrack = track['item']['duration_ms'] / 1000
statusTrack = track['is_playing']

print(f"Nome: {nameTrack}")
print(f"Artista: {artistsTrack}")
print(f"Album: {albumTrack}")
print(f"Porcentagem já ouvida: {int((progressTrack/durationTrack)*100)}%")
Example #18
0
def run_kfold(topic, no_folds, results_type, conf_matrix):
    """
    Runs kfold test using credentials in ../Credentials.py
    """

    # get credentials, import + export folders
    import Credentials
    active_adoption = Credentials.active_adoption
    instance_creds = Credentials.ctx[active_adoption]
    workspace_id = Credentials.workspace_id[active_adoption][topic]
    workspace_thresh = Credentials.calculate_workspace_thresh(topic)
    conversation_version = Credentials.conversation_version

    # import + export folders
    import config
    import time
    data_folder = config.data_dir
    export_folder = config.output_folder
    timestr = time.strftime("%Y%m%d-%H%M")

    output_loc_results = os.path.join(
        export_folder, "{}_kfold_results_raw_{}.csv".format(topic, timestr))
    output_loc_metrics = os.path.join(
        export_folder, "{}_kfold_results_metrics_{}.csv".format(topic, timestr))
    output_loc_confmat = os.path.join(
        export_folder, "{}_kfold_confmat_{}.png".format(topic, timestr))

    # authenticate
    if 'apikey' in instance_creds:
        logger.debug("Authenticating (apikey)")
        kf = kfoldtest(n_folds=no_folds, apikey=instance_creds['apikey'],
                       url=instance_creds['url'], threshold=workspace_thresh, version=conversation_version)
    elif 'password' in instance_creds:
        logger.debug("Authenticating (username/password)")
        kf = kfoldtest(n_folds=no_folds, username=instance_creds['username'], password=instance_creds['password'], url=instance_creds['url'], threshold=workspace_thresh,
                       version=conversation_version)

    # get train df from watson + check there are sufficient workspaces to run the test
    train_df = kf.intent_df_from_watson(workspace_id)
    kf.check_sufficient_workspaces()

    # create folds in WA if above is true
    folds = kf.create_folds(method='kfold')
    kf.create_kfold_WA(folds)

    available_flag = False

    while available_flag == False:
        logger.info("Checking workspaces..")
        available_flag = kf.check_workspaces_status()
        time.sleep(20)

    # run kfold test
    try:
        results = kf.run_kfold_test(folds)

        if (results_type == 'raw') or (results_type == 'all'):
            results.to_csv(output_loc_results)

        classification_report = kf.create_classification_report(results)

        if (results_type == 'metrics') or (results_type == 'all'):
            metrics = Metrics(workspace_thresh)
            metric_df = metrics.get_all_metrics_CV(
                results, fold_col='fold', detailed_results=False)
            metric_df.to_csv(output_loc_metrics)

        # TODO: confusion matrix
        if conf_matrix:
            from confusionmatrix import ConfusionMatrix
            cfn = ConfusionMatrix(workspace_thresh=workspace_thresh)
            cfn.create(results, fig_path=output_loc_confmat)
            logger.info("Confusion matrix saved to {}".format(
                output_loc_confmat))

    finally:
        # regardless of what happens above, delete the temporary workspaces before exiting
        kf.delete_kfold_workspaces()
Example #19
0
import Subnet
import NIC
import VirtualMachine

# Required Variables
GROUP_NAME = 'Python-Sachin'
LOCATION = 'southeastasia'
AVLSET_NAME = 'DemoAVSet'
PIP_NAME = 'DemoPIP'
VNET_NAME = 'DemoVNET'
SUBNET_NAME = 'DemoSubnet'
NIC_NAME = 'DemoNIC'
VM_NAME = 'DemoVM'

if __name__ == "__main__":
    credentials = Credentials.get_credentials()
    rg_client = ResourceManagementClient(credentials,
                                         Credentials.SUBSCRIPTION_ID)
    network_client = NetworkManagementClient(credentials,
                                             Credentials.SUBSCRIPTION_ID)
    compute_client = ComputeManagementClient(credentials,
                                             Credentials.SUBSCRIPTION_ID)
    print(ResourceGroup.create_resource_group(rg_client, GROUP_NAME, LOCATION))
    print(
        AvailabilitySet.create_availability_set(compute_client, GROUP_NAME,
                                                LOCATION, AVLSET_NAME))
    print(
        PIP.create_public_ip_address(network_client, GROUP_NAME, LOCATION,
                                     PIP_NAME))
    print(
        VirtualNetwork.create_virtual_network(network_client, GROUP_NAME,
Example #20
0
		noResults = 10;

	return service.events().list(calendarId = calId,
		timeMin = timeNow,
		maxResults = noResults,
		singleEvents = True,
		q = queryText,
		orderBy = 'startTime').execute()
	
def fetch_events():

	calIds = calendar.getList('seminars');
	events = [];
	for item in calIds:
		print "For calendarId: ", item;
		q = generate_query(calId = item, queryText='Test');
		events.extend(q.get('items', []));

	if not events:
		print 'No upcoming events found.';
	for event in events:
		start = event['start'].get('dateTime', event['start'].get('date'));
		print start, event['summary'];	

calendar = Calendar.Calendar();
credentials = Credentials.get_credentials();
service = build('calendar', 'v3', http=credentials.authorize(Http()))
print 'Getting the upcoming 10 events'
fetch_events()

    
Example #21
0
    def startfederation(self):
        # logging.info('Starting')
        if self.validateFields() == 1:
            self.pb_start.setText('Working')
            logging.info('Validating Fields')
            #logging
            #textBoxUpdater = updatetextbox.UpdatetextBox(self.te_log)

            server1creds = ''
            server2creds = ''
            if self.cb_cup.isChecked():
                logging.debug('Found server 1 credentials')
                server1creds = Credentials.Credentials(self.le_appid1.text(),
                                                       self.le_apppw1.text(),
                                                       self.le_platid1.text(),
                                                       self.le_platpw1.text())
                plat_tuple1 = server1creds._platid, server1creds._platpw

            if self.cb_copy.isChecked():
                logging.debug('Found server 2 credentials')
                server2creds = server1creds
            else:
                if self.cb_cup.isChecked():
                    logging.debug('Copying Server 2 credentials from server 1')
                    server2creds = Credentials.Credentials(
                        self.le_appid2.text(), self.le_apppw2.text(),
                        self.le_platid2.text(), self.le_platpw2.text())
                    plat_tuple2 = server2creds._platid, server2creds._platpw
            #self.log('server1creds')
            #self.log(server1creds._appid)
            #self.log(str(server1creds._apppw))
            # self.log('startfederation')
            hn1 = self.le_server1_hn.text()
            ip1 = self.le_server1_ip.text()
            dn1 = self.le_server1_dn.text()
            hn2 = self.le_server2_hn.text()
            ip2 = self.le_server2_ip.text()
            dn2 = self.le_server2_dn.text()
            dnsip = self.le_dns.text()
            dbsecret = self.le_dialback.text(
            )  #needed since dbsecret is optional
            if not dbsecret:
                dbsecret = '1234'
                logging.info("Using default dial back secret")
            self.advancedTab = advancedTab.AdvancedTab(
                dbsecret, self.cb_certs.isChecked(), self.cb_sasl1.isChecked(),
                self.cb_sasl2.isChecked())
            tlsoption = self.cb_tls.currentIndex()
            worker1 = FedController.WorkerThread(ip1, tlsoption, 'r', hn1, dn1,
                                                 dnsip, server1creds,
                                                 self.advancedTab)
            worker2 = FedController.WorkerThread(ip2, tlsoption, 'r', hn2, dn2,
                                                 dnsip, server2creds,
                                                 self.advancedTab)
            if self.cb_tls.currentIndex() == 1 or self.cb_tls.currentIndex(
            ) == 2:
                logging.info('inside if block tls enabled')
                worker1.downloadCerts()
                worker2.downloadCerts()
                dnldir1 = worker1.downloaddir
                dnldir2 = worker2.downloaddir
                logging.info("Server 1 certs downloaded to " + str(dnldir1))
                logging.info('Server 2 certs downloaded to ' + str(dnldir2))

                uploader1 = uploadCerts.UploadCerts(ip1, 'xmpp', plat_tuple1,
                                                    dnldir2)
                uploader2 = uploadCerts.UploadCerts(ip2, 'xmpp', plat_tuple2,
                                                    dnldir1)

        else:
            pass
Example #22
0
class LazyLinkedIn(object):
    driver = webdriver.Chrome(executable_path="C:\\Users\\Stratos\\Desktop\\LazyLinkedIn\\venv\\chromedriver.exe")
    driver.implicitly_wait(4)
    driver.maximize_window()
    user = Credentials.User();

    def login(self):
        self.driver.get("https://www.linkedin.com")
        usernameField = self.driver.find_element(By.XPATH, '//*[@id = "login-email"]')
        passwordField = self.driver.find_element(By.XPATH, '//*[@id = "login-password"]')
        usernameField.send_keys(self.user.username)
        passwordField.send_keys(self.user.password)
        passwordField.send_keys(Keys.RETURN)
        return self
    def jobs(self):
        jobsButton = self.driver.find_element_by_id("jobs-tab-icon")
        jobsButton.click()
        jobName=self.driver.find_element(By.XPATH,"//input[contains(@id,'jobs-search-box-keyword-id')]")
        jobLocation=self.driver.find_element(By.XPATH,"//input[contains(@id,'jobs-search-box-location')]")
        time.sleep(1.2)
        jobName.send_keys(self.user.job) #TODO FIND SOMETHING BETTER CAUSE THIS SUX
        jobLocation.send_keys(self.user.location)
        time.sleep(2)
        jobLocation.send_keys(Keys.RETURN)
        return self

    def filters(self):
        self.driver.find_element(By.XPATH,"//h3[contains(.,'LinkedIn Features')]").click()
        self.driver.find_element(By.XPATH,"//label[contains(.,'"+self.user.linkedInFeature+"')]").click()
        self.driver.find_element(By.XPATH,"//*[@id='linkedin-features-facet-values']//button[contains(.,'Apply')]").click()
        self.driver.find_element(By.XPATH, "//h3[contains(.,'Experience')]").click()
        self.driver.find_element(By.XPATH, "//label[contains(.,'" + self.user.experience+"')]").click()
        self.driver.find_element(By.XPATH,"//*[@id='experience-level-facet-values']//button[contains(.,'Apply')]").click()
        return self

    def getJobList(self):
        totalJobs=self.driver.find_element(By.XPATH,"//div[contains(.,'results') and contains(@class,'t-12')]").get_attribute("innerText")
        return [totalJobs, self.driver.find_elements(By.XPATH,"//ul[contains(@class,'jobs-search-results__list')]/li")]


    def parseThroughList(self,current=1):
        self.currentPage=current
        i = 0
        #currentPage=1
        time.sleep(2)
        totalJobs= self.getJobList()[0]
        list = self.getJobList()[1]
        for x in list:
            i = i + 1
            print("vlepw doulia noumero:",i,"selida:",self.currentPage)
            if(i==25):
                self.currentPage+=1
                print("paw selida:", self.currentPage)
                self.driver.find_element(By.XPATH,"//button[contains(@aria-label,'Page "+str(self.currentPage)+"')]").click()
                self.parseThroughList(self.currentPage)
            try:
                x.click()
            except:
                pass
            try:
                self.driver.find_element(By.XPATH,"//button[@class='jobs-apply-button--top-card artdeco-button--3 artdeco-button--primary jobs-apply-button artdeco-button ember-view']").click()
                time.sleep(1)
            except:
                continue
            try:
                self.currentTabApplication()
            except:
                self.newTabApplication()


    def currentTabApplication(self):
        self.driver.find_element(By.XPATH,"//button[@class='jobs-apply-form__submit-button button-primary-large ']").click()
        time.sleep(2)
        self.driver.find_element(By.XPATH, "//button[@class='artdeco-dismiss']").click()

    def newTabApplication(self):
        self.driver.switch_to_window(self.driver.window_handles[1])
        time.sleep(2)
        self.driver.find_element(By.XPATH, "//button[contains(.,'Submit')]").click()
        time.sleep(2)
        self.driver.close()
        time.sleep(2)
        self.driver.switch_to_window(self.driver.window_handles[0])
        time.sleep(3)
        self.parseThroughList(self.currentPage)
Example #23
0
import Credentials
from twilio import twiml
from twilio.rest import Client
from twilio.twiml.messaging_response import MessagingResponse
from flask import Flask, request, redirect
import openpyxl
from openpyxl import Workbook

account_sid = Credentials.get_sid()
auth_token = Credentials.get_auth()
path = Credentials.path

app = Flask(__name__)


def update_xl(message_body):
    wb = openpyxl.load_workbook(path)
    sheet = wb.active

    lastRow = sheet.max_row
    sheet.cell(column=1, row=lastRow + 1, value="{0}".format(message_body))

    wb.save(filename=path)


@app.route("/sms", methods=['GET', 'POST'])
def sms_reply():

    #Retrieve actual message
    message_body = request.form['Body']
Example #24
0
# -*- coding: utf-8 -*-
"""
Éditeur de Spyder

Ceci est un script temporaire.
"""
import time
import hashlib
from Credentials import *
from selenium import webdriver
from selenium.webdriver.common.keys import Keys

cred = Credentials()
user_login = cred.get_login()
user_password = cred.get_password()

print(user_login)

browser = webdriver.Chrome(
    executable_path=
    "//10.12.100.48/HomeEtu/etudiants/vbrother/Desktop/BOTS/chromedriver_win32/chromedriver"
)
"""browser.get("https://cas.upf.pf/login?service=https%3A%2F%2Fent.upf.pf%2Findex.php%3Fauth")

username = browser.find_element_by_id("username").send_keys(user_login)
password = browser.find_element_by_id("password").send_keys(user_password + Keys.ENTER)
"""
browser.execute_script('''window.open("http://google.com","_blank");''')
browser.switch_to.window(browser.window_handles[1])

#
Example #25
0
def run_blindset(topic, results_type, conf_matrix, blindset_name):
    """
    Runs blindset test using credentials in ../Credentials.py
    """

    # get credentials, import + export folders
    import Credentials
    active_adoption = Credentials.active_adoption
    instance_creds = Credentials.ctx[active_adoption]
    print(instance_creds)
    print('print works')

    workspace_id = Credentials.workspace_id[active_adoption][topic]
    workspace_thresh = Credentials.calculate_workspace_thresh(topic)
    conversation_version = Credentials.conversation_version

    # import + export folders
    import config
    import time
    data_folder = config.data_dir
    export_folder = config.output_folder
    timestr = time.strftime("%Y%m%d-%H%M")

    blindset_name = blindset_name or topic + "_blindset.csv"
    output_loc_results = os.path.join(
        export_folder, "{}_results_raw_{}.csv".format(topic, timestr))
    output_loc_metrics = os.path.join(
        export_folder, "{}_results_metrics_{}.csv".format(topic, timestr))
    output_loc_confmat = os.path.join(
        export_folder, "{}_confmat_{}.png".format(topic, timestr))

    # authenticate
    if 'apikey' in instance_creds:
        logger.debug("Authenticating (apikey)")
        bs = blindset(apikey=instance_creds['apikey'],
                      url=instance_creds['url'],
                      threshold=workspace_thresh,
                      version=conversation_version)
    elif 'password' in instance_creds:
        logger.debug("Authenticating (username/password)")
        bs = blindset(username=instance_creds['username'],
                      password=instance_creds['password'],
                      url=instance_creds['url'],
                      threshold=workspace_thresh,
                      version=conversation_version)

    # run test
    blindset_df = bs.import_blindset(os.path.join(data_folder, blindset_name))
    # TODO: check blindset df
    results = bs.run_blind_test(blindset_df, workspace_id)

    # exports + metrics
    if (results_type == 'raw') or (results_type == 'all'):
        cols_export = [
            col for col in results.columns.values if col != 'intent_correct'
        ]
        results[cols_export].to_csv(output_loc_results, encoding='utf-8')
        logger.info("Raw results exported to {}".format(output_loc_results))

    if (results_type == 'metrics') or (results_type == 'all'):
        met = Metrics(workspace_thresh)
        metric_df, _ = met.get_all_metrics(results, detailed_results=True)

        metric_df.to_csv(output_loc_metrics, encoding='utf-8')
        logger.info(
            "Metrics per intent exported to {}".format(output_loc_metrics))

    # confusion matrix
    if conf_matrix:
        from confusionmatrix import ConfusionMatrix
        cfn = ConfusionMatrix(workspace_thresh=workspace_thresh)
        cfn.create(results, fig_path=output_loc_confmat)
        #bs.plot_confusion_matrix(results, output_loc_confmat)
        logger.info("Confusion matrix saved to {}".format(output_loc_confmat))

    # print high-level metrics
    overall_metrics = bs.calculate_overall_metrics(results,
                                                   av_method="weighted")
    logger.info("Overall metrics for the workspace (weighted):")
    logger.info(overall_metrics)