Ejemplo n.º 1
0
    def test_search_pokemon_by_number(self):
        #pokemon = requests.get('https://pokeapi.co/api/v2/pokemon/1')
        my_data = GetData()
        pokemon = my_data.get('1')

        # Comentamos lo de arribaa, ahora vamos a recuperar la información del pokemon a consultar
        data = my_data.get_pokemon_data('bulbasaur')

        my_pokemon = pokemon.json()

        self.assertEqual(200, pokemon.status_code)
        self.assertEqual(my_pokemon['name'], data['name'])
Ejemplo n.º 2
0
    def test_search_valid_pokemon(self):
        # Get, se realiza sobre una URL, utilizamos un endpoint para recuperar su información
        #pokemon = requests.get('https://pokeapi.co/api/v2/pokemon/pikachu')
        # Cambiamos por el endpoint Kakuna

        #Vamos a llamar al método que tiene nuestra url base, le vamos a pasar como parámetro
        #el pokemon que vamos a querer de endpoint para traer su información
        #Creamos el objeto de la clase getData
        my_data = GetData()
        pokemon = my_data.get('kakuna')
        my_pokemon = pokemon.json()

        #pokemon = requests.get('https://pokeapi.co/api/v2/pokemon/kakuna')
        #my_pokemon = pokemon.json()

        #Verificamos el código del estado
        #print(pokemon.status_code)
        #Va a devolver la infomación del endpoint (pokemon/pikachu) en formato json
        #print(my_pokemon)
        #Quiero que solo me devuelva la información de la propiedad weight
        #print(my_pokemon['weight'])
        # Quiero recuperar la información de los movimientos (tiene 5) del pokemon kakuna
        #print(my_pokemon['moves'])
        # Estamos verificando el movimiento en el índice 4
        #print(my_pokemon['moves'][4])

        #Me voy a traer la información que se encuentra en el archivo kakuna.json
        #por tanto, ya no voy a utilizar la lista moves que se muestra abajo.
        #Para ello voy a importar la librería json
        #with open('kakuna.json') as kakuna:
        #    data = json.load(kakuna)

        #Comentamos lo de arriba, ahora vamos a recuperar la información del pokemon a consultar
        data = my_data.get_pokemon_data('kakuna')

        #Ahora en una variable voy a asignar los 5 movimientos del pokemon Kakuna
        #moves = ['string-shot', 'harden', 'iron-defense', 'bug-bite', 'electroweb']

        #Ahora quiero saber si 200 es el valor que devolvio el request cuyo valor es 200, es decir si esta OK
        self.assertEqual(200, pokemon.status_code)
        #Pikachu, quiero verificar que 60 sea el valor de la propiedad => weight cuyo valor es 60
        #kakuna, quiero verificar que 100 sea el valor de la propiedad => weight cuyo valor es 100
        #self.assertEqual(100, my_pokemon['weight'])
        self.assertEqual(data['weight'], my_pokemon['weight'])

        #Va a recorrer todos los elementos de los movimientos
        for i in my_pokemon['moves']:
            #Ahora si se puede recorrer el array, porque i es un elemento del array porque esta iterando
            print(i['move']['name'])
            #Vamos a verificar que los elementos que recorra de la lista my_pokemon['moves]
            #si se encuentran también en la lista de elementos de moves
            #self.assertTrue(i['move']['name'] in moves)
            self.assertTrue(i['move']['name'] in data['moves'])
Ejemplo n.º 3
0
def main(K, alpha=0.5, iterations=300):

    # get data
    datagetter = GetData("data/Amazon.mat")
    A, X, gnd = datagetter.readFile()
    Anorm = normA.noramlization(A)
    A = torch.tensor(A, dtype=torch.float32)
    X = torch.tensor(X, dtype=torch.float32)
    gnd = torch.tensor(gnd, dtype=torch.float32)
    Anorm = torch.tensor(Anorm, dtype=torch.float32)
    samples = datagetter.returnSamples()
    attributes = datagetter.returnAttributes()

    # model
    if torch.cuda.is_available():
        A = A.cuda()
        X = X.cuda()
        gnd = gnd.cuda()
        Anorm = Anorm.cuda()
        dominant = Dominant.DominantModel(Anorm, attributes)
        dominant = dominant.cuda()
    else:
        dominant = Dominant.DominantModel(Anorm, attributes)

    gd = torch.optim.Adam(dominant.parameters(), lr=0.005)
    # print(dominant)

    # training
    for iter in range(iterations):
        reconstructionStructure, reconstructionAttribute = dominant(X)
        loss = alpha * torch.norm(reconstructionStructure - A) + (
            1 - alpha) * torch.norm(reconstructionAttribute - X)

        gd.zero_grad()
        loss.backward()
        gd.step()

    # get score
    if torch.cuda.is_available():
        structureError = (reconstructionStructure - A).cpu().detach().numpy()
        attributeError = (reconstructionAttribute - X).cpu().detach().numpy()
    else:
        structureError = (reconstructionStructure - A).detach().numpy()
        attributeError = (reconstructionAttribute - X).detach().numpy()
    structureLoss = np.linalg.norm(structureError, axis=1, keepdims=True)
    attributeLoss = np.linalg.norm(attributeError, axis=1, keepdims=True)
    score = alpha * structureLoss + (1 - alpha) * attributeLoss
    RecallatK = calculateRecallAtK(score, gnd, K)
    PrecisionatK = calculatePrecisionAtK(score, gnd, K)
    print("Recall @ {}: \t{}".format(K, RecallatK))
    print("Recall @ {}: \t{}".format(K, PrecisionatK))
    print("AUC value: \t{}".format(calculateAUC.getAUC(score=score, gnd=gnd)))
Ejemplo n.º 4
0
 def test_search_valid_pokemon(self):
     my_data = GetData()
     #pokemon = requests.get('https://pokeapi.co/api/v2/pokemon/kakuna')
     pokemon = my_data.get('kakuna')
     my_pokemon = pokemon.json()
     with open('kakuna.json') as kakuna:
         data = json.load(kakuna)
     #moves = ['string-shot', 'harden', 'iron-defense', 'bug-bite', 'electroweb']
     self.assertEqual(200, pokemon.status_code)
     self.assertEqual(data['weight'], my_pokemon['weight'])
     for i in my_pokemon['moves']:
         print(i['move']['name'])
         self.assertTrue(i['move'], ['name'] in data['moves'])
Ejemplo n.º 5
0
def startWriteData(Cookie):
    global rowIndex

    WriteData().writeFirst()

    for latitudeAndlongitude in latitudeAndlongitudeArr:
        print(latitudeAndlongitude)
        pageIndex = 0
        shopList = GetData(Cookie).getShopList(pageIndex, latitudeAndlongitude)
        while shopList is not None and len(shopList) > 0:
            WriteData().write_excel(shopList, rowIndex, latitudeAndlongitude)
            pageIndex += 1
            rowIndex += 1
            shopList = GetData(Cookie).getShopList(pageIndex,
                                                   latitudeAndlongitude)
Ejemplo n.º 6
0
    def M1809_GetData(self):
        # self_result = self.AnalyseObj.Compare2Themself(self.company_id_list[0],
        #    self.DataSource)  # 自身对比
        GetDataObj = GetData(self.DataSource, self.HstPath)
        self_result = GetDataObj.Compare2Themself(self.company_id_list[0])
        b1 = GetDataObj.Compare2Industry(self.company_id_list)  #同行业对比
        compare_result = GetDataObj.data_normalize(b1)  #归一化的同行业对比
        if self.LocalStore == 'ON':
            SelfResultPath = os.path.join(self.OutPath + '\\compare_self.csv')
            ComparePath = os.path.join(self.OutPath + '\\compare_industry.csv')
            NomalizePath = os.path.join(self.OutPath + '\\normalize.csv')

            self_result.to_csv(SelfResultPath, encoding='gbk')
            b1.to_csv(ComparePath, encoding='gbk')
            compare_result.to_csv(NomalizePath, encoding='gbk')
        return self_result, compare_result
Ejemplo n.º 7
0
def main(dramaname, autor, act):
    u"""Main zum Ausführen des Programms."""
    gd = GetData()
    gt = GetText()
    gs = GetSentiment()
    gm = GraphMalen()

    if dramaname:
        dramaname = dramaname
    else:
        dramaname = click.prompt('Gib den Namen eines Dramas ein')
    if autor:
        autor = autor
    else:
        autor = click.prompt('Gib den Nachnamen des Autors ein')
    draname = gd.eingabe_drama(dramaname, autor)
    tei = gd.get_tei(draname)
    csv_drama = gd.get_csv(draname)
    replik = gt.create_replik_dict(csv_drama)
    soup = gt.stir_the_soup(tei)
    if act:
        pass
    else:
        print("Das ausgewählte Drama hat {} Akte".format(
            gt.how_many_acts(soup)))
        act = click.prompt(
            'Gib den Akt ein, den du analysieren willst (falls du das Netzwerk für das gesamte Drama haben möchtest, wähle 0)'
        )
    which_act = int(act) - 1
    if which_act == -1:
        total = gt.drama_total(soup)
        replik = gt.which_type(total, replik)
    else:
        act = gt.drama_act(soup, which_act)
        replik = gt.which_type(act, replik)
    replik = gs.get_sentis(replik)
    all_in_all = gs.average_senti(replik)
    nodes = gm.get_nodes(csv_drama)
    edges = gm.get_edges(all_in_all)
    labels_edges = gm.get_labels(edges)
    graph = gm.graph(edges, nodes)
    gm.malen(graph, labels_edges, draname, which_act + 1)
    os.system('clear')
    menu()
Ejemplo n.º 8
0
def show_data(entry):
    bookName = entry.get().strip()
    if bookName == '':
        entry.delete(0, tk.END)
        entry.insert(0, '')
    else:
        book = GetData(bookName)
        book_data = book.parsed_data()
        print(book_data)
        if type(book_data) == tuple:
            raw_data = urllib.request.urlopen(str(book_data[5])).read()
            im = PIL.Image.open(io.BytesIO(raw_data))
            image = PIL.ImageTk.PhotoImage(im)
            label1.configure(image=image)
            label['text'] = "Book : " + str(book_data[0]) + '\n' + "Author : " + str(book_data[1]) + '\n' + "Published Year : " + str(book_data[2]) + '\n' + "Rating : " + str(book_data[3]) + '\n' + "Total Reviews : " + str(book_data[4])
            label1.photo = image
        else :
            label['text'] = book_data
        entry.delete(0, tk.END)
        entry.insert(0, bookName)
Ejemplo n.º 9
0
 def getup(getDataLength):
     # get data
     data = GetData(self.driver, getDataLength).get()
     getDataLength = len(data)
     # write CSV
     today = datetime.now().strftime("%Y%m%d")
     writeTime = datetime.now().strftime("%Y%m%d%H%M%S")
     table_name = "unipos_" + today
     file = self.file_path + "unipos_" + writeTime + ".csv"
     WriteCsv(data, file).write()
     sleep(5)
     UploadCSVtoBigquery(table_name, file)
     os.remove(file)
     print("remove" + file)
     return getDataLength
Ejemplo n.º 10
0
    def write_excel(self, shopList, rowIndex, latitudeAndlongitude):

        for i in range(0, len(shopList)):
            currentRow = i + 1 + rowIndex * 8
            print('正在写第{1}页第{0}个商家数据'.format(currentRow, rowIndex + 1))

            # ##################################################################
            # 获取评论标签,获取一次即可,每次是一样的
            commentList = GetData().getComments(
                shopList[i].get('restaurant').get('id'), latitudeAndlongitude)
            if commentList.get('tags') is not None:
                commentLabels = parseCommentLabels(commentList.get('tags'))
                for columnIndex in range(0, len(list(row0.values()))):
                    if commentLabels.get(list(
                            row0.keys())[columnIndex]) is not None:
                        # print('commentlabel{1}'.format(commentLabels.get(list(row0.values())[columnIndex])))
                        sheet1.write(
                            currentRow, columnIndex,
                            commentLabels.get(list(row0.keys())[columnIndex]),
                            defaultStyle)
            # ##################################################################
            # 获取评分
            if commentList.get('rating') is not None:
                commentRatingLabels = commentList.get('rating')
                for columnIndex in range(0, len(list(row0.values()))):
                    if commentRatingLabels.get(list(
                            row0.keys())[columnIndex]) is not None:
                        # print('commentlabel{1}'.format(commentLabels.get(list(row0.values())[columnIndex])))
                        sheet1.write(
                            currentRow, columnIndex,
                            commentRatingLabels.get(
                                list(row0.keys())[columnIndex]), defaultStyle)
            # ##################################################################
            # 获取商家信息
            for columnIndex in range(0, len(list(row0.values()))):
                if shopList[i].get('restaurant').get(
                        list(row0.values())[columnIndex]) is not None:
                    sheet1.write(
                        currentRow, columnIndex,
                        str(shopList[i].get('restaurant').get(
                            list(row0.values())[columnIndex])), defaultStyle)
            # ##################################################################
            # 地址信息需要单独获取
            shopAddress = GetData().getInfo(
                shopList[i].get('restaurant').get('id'))
            sheet1.write(currentRow, 1, shopAddress, defaultStyle)

            f.save('test.xls')
Ejemplo n.º 11
0
    import externs
    conf = ConfigParser("conf/client/windows.cfg").get()
elif osVer == "Linux":
    import externs
    conf = ConfigParser("conf/client/linux.cfg").get()


def exit(signum, frame):
    externs.setWallpaper(bg)
    sys.exit()


signal.signal(signal.SIGINT, exit)

bg = externs.getWallpaper()
data = GetData(conf)
outFile = bg

while True:
    output = data.run()
    # Need serialize / deserialize methods before this is even an option
    # datFile = open(saneConf.temp + "/datout.txt", "w")
    # datFile.write(str(data))
    # datFile.close()

    if "wallpaper1.jpg" in outFile:
        outFile = "/wallpaper2.jpg"
        try:
            os.remove(saneConf.temp + "/wallpaper1.jpg")
        except OSError:
            pass
Ejemplo n.º 12
0
    #    ['2006-05-27-#ubuntu-positive.tsv', ':)']
    # ]

    # load data from tsv and build data collection
    selected_features = [
        "words",
        "negative_words",
        "positive_words",
        "positive_words_hashtags",
        "negative_words_hashtags",
        "uppercase_words",
        "special_punctuation",
        "adjectives"
    ]

    dataCollection = GetData(data_class, n_per_class, training_percentage, selected_features)

    # split data collection into training and test data
    training_data = dataCollection.get_training_data()
    training_label = np.array(dataCollection.get_training_label())
 
    print('Extracting features...')

    # Get the feature matrix of this data
    print(' extracting train_data')
    training_features = dataCollection.get_training_feature_matrix()

    number_classes = len(data_class)
    
    number_of_clusters = 50
    
Ejemplo n.º 13
0
import openpyxl
from openpyxl.workbook import Workbook


def getConfig(section, key):
    config = configparser.ConfigParser()
    path = os.path.split(os.path.realpath(__file__))[0] + '/config.ini'
    config.read(path)
    return config.get(section, key)


if __name__ == '__main__':
    choosen_stocks_num = int(getConfig('rrl', 'choosen_stocks_num'))
    window_len = int(getConfig('rrl', 'window_len'))
    rrl = RRL()
    getdata = GetData()
    print('train begin')
    print("============================================")
    train_data = getdata.train1()
    feed = {
        rrl.input: train_data,
        rrl.Ftminus1: getdata.Ftminus1,
        # rrl.cash: [[getdata.cash]],
        rrl.price: getdata.train_price
    }
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        for i in tqdm.tqdm(range(100)):
            print('\n')
            _, sr, ft = sess.run(
                [rrl.train_step_noholder, rrl.sharpe_ratio, rrl.outputs],
Ejemplo n.º 14
0
        #   'failed': 'abort_state'})
        #
        # sm.add('shelf_scan',ScanShelfState(),
        #  transitions={'succeeded':'toggle_lip_on','failed':'abort_state'})
        #
        # sm.add('toggle_lip_on', ToggleBinFillersAndTote(action='lip_on'),
        #  transitions={'succeeded':'get_kinfu_cloud',
        #   'failed': 'abort_state'})

        sm.add('get_kinfu_cloud',GetKinfuCloud(),
         transitions={'succeeded':'crop_kinfu_cloud','failed':'abort_state'})

        sm.add('crop_kinfu_cloud',ShelfBasedCropCloud(),
        transitions={'succeeded':'get_data','failed':'reset_kinfu'})

        sm.add('get_data', GetData(),
            transitions={'succeeded':'set_the_next_bin',
                          'failed':'reset_kinfu'})

        sm.add('abort_state', AbortState(),
            transitions={'succeeded':'succeeded'})


    # Create and start the introspection server
    #  (smach_viewer is broken in indigo + 14.04, so need for that now)
    sis = smach_ros.IntrospectionServer('server_name', sm, '/SM_ROOT')
    sis.start()

    # run the state machine
    #   We start it in a separate thread so we can cleanly shut it down via CTRL-C
    #   by requesting preemption.
Ejemplo n.º 15
0
 def test_search_pokemon_by_number(self):
     my_data = GetData()
     pokemon = my_data.get('1')
     my_pokemon = pokemon.json()
     self.assertEqual(200, pokemon.status_code)
     self.assertEqual(my_pokemon['name'], 'bulbasaur')
Ejemplo n.º 16
0
        ['../Data/Twitter/positive_tabed.tsv', 1, ':('],
    ]

    # load data from tsv and build data collection
    selected_features = [
        "words",
        "negative_words",
        "positive_words",
        "positive_words_hashtags",
        "negative_words_hashtags",
        "uppercase_words",
        "special_punctuation",
        "adjectives"
    ]

    dataCollection = GetData(data_class, n_per_class, training_percentage, selected_features, is_bootstrap=False)

    # split data collection into training and test data
    training_data = dataCollection.get_training_data()
    training_label = dataCollection.get_training_label()
    test_data = dataCollection.get_test_data()
    test_label = dataCollection.get_test_label()

    print('\nExtracting features..')
    training_features = dataCollection.get_training_feature_matrix()
    test_features = dataCollection.get_test_feature_matrix()

    net = perceptron.Perceptron(n_iter=iteration, verbose=1, random_state=None, shuffle=False, class_weight='auto', eta0=0.0002)

    net.fit(training_features, training_label)
Ejemplo n.º 17
0
#!/usr/bin/python
#coding:utf-8
from getData import GetData
from sendData import Sender
import datetime

url = "http://192.168.1.5:8000/eq/equip_api/"  #服务器的ip及提交的函数
try:
    data = GetData()
    sendData = data.getData()

    sender = Sender(url, sendData)  #向指定的url发送获取的数据
    sender.get_request()
    response = sender.get_response()
    print(response)
except Exception as e:  #报错机制
    with open("/opt/CMDB/log.txt", "a+") as f:
        time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        content = "[%s]:%s\n" % (time, str(e))
        f.write(content)
Ejemplo n.º 18
0
import requests,json


# url地址
url = 'http://192.168.1.222:8000/servers/' + sys.argv[1] + '/'
loginurl = "http://192.168.1.222:8000/login/"

loginData = {
"username":"******",
"password":"******"
}

# 请求登录接口获取token
getlogin = GetToken(loginurl,loginData)
token = getlogin.getres()
headers = {
  'content-type': 'application/json',
  "Authorization":"JWT "+token}

#采集数据
mydata = GetData()
sendData = mydata.getData()
# 发送数据
sender = Sender(url,sendData,headers)
sender.get_request()
response = sender.get_response()

# 获取响应
print(response)