Exemple #1
0
def main():
    pair = "EUR_USD"
    timeframe = "H1"

    #ignore last datapoint since candle is incomplete

    modelPath = 'models/EUR_USD_H1_lstm.h5'
    model = load_model(modelPath)

    data = getData(pair, timeframe, 150)

    predictions = []
    for i in range(100):
        inputs = data[i:50 + i]

        actual = data[50 + i]
        print "Inputs:\n", inputs
        normalizedInput, p0 = normalize(inputs)
        reshapedInput = np.reshape(normalizedInput, (1, 50, 1))

        normalizedPrediction = predict(model, reshapedInput)
        #print "Normalized Prediction:\n", normalizedPrediction

        denormalizedPrediction = denormalize(normalizedPrediction, p0)
        #print "Denormalized Prediction:\n", denormalizedPrediction

        predictions.append((str(denormalizedPrediction), (inputs[49], actual)))
    for prediction in predictions:
        print "Prediction: ", prediction[0], "Actual: ", prediction[1][
            1], "Previous: ", prediction[1][0], "Direction: ", directionTrue(
                prediction[1][1], prediction[1][0], prediction[0])
    numDirectionTrue(predictions)
def single(routeid):
    data , MinValue= getData(routeid)
    if(len(data)<50):
        return 0
    #print('total:',len(data_df),'train:',len(train_df),'test:',len(test_df))
######
    Features , Label ,TestFeatures , TestLabel, Label_NUM = preprocess(data)
###### building model
    model = Sequential()
    model.add(Dense(units=32,input_dim=3,kernel_initializer='uniform'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))

    model.add(Dense(units=32,kernel_initializer='uniform'))
    #model.add(BatchNormalization())
    model.add(Activation('relu'))
    #model.add(Dense(units=16,kernel_initializer='uniform',activation='relu'))
    #model.add(Dense(units=16,kernel_initializer='uniform',activation='relu'))
    #model.add(Dense(units=4,kernel_initializer='uniform',activation='relu'))
    #model.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
######

###### training

    model.add(Dense(units=Label_NUM,kernel_initializer='uniform'))
    #model.add(BatchNormalization())
    model.add(Activation('softmax'))
    model.compile(loss='binary_crossentropy',optimizer='Adagrad',metrics=['accuracy'])
    train_history = model.fit(x=Features,y=Label,validation_split=0.5,epochs=50,batch_size=50,verbose=2)
######
    path = 'models/' + routeid + '.h5'
    model.save(path)
Exemple #3
0
def login():
    username, password = gd.getData()                   #get username,password from getData function module
    if vp.verifyPassword(username, password):           #verifies password
        print('Successfully Logged in')
        return True
    else:
        print("Invalid username or password")
def test_api_request():
    if 'credentials' not in session:
        return redirect('authorize')
    events_result = get_event()
    events = events_result.get('items', {})
    t = getData.getData(events)
    return jsonify(**t)
def daily_update_loop():
    # start session with database
    Session = sessionmaker(bind=engine)
    session = Session()

    week = 1  # TODO: find way of getting week of term from date
    term = 1  # TODO: find way of getting term today

    # loop through each day of the week and update the predicted value on that day
    for day in range(1, 6):
        predData = getData(term, week, day)
        for i, time in enumerate(times):
            data = session.query(Data).filter_by(day=days[day - 1],
                                                 time=time).first()
            # for now, update with the average of the minumum and maximum
            data.jnr_expected = predData["Jnr"][i]
            data.snr_expected = predData["Snr"][i]

    session.commit()

    # find next time until update
    current = datetime.datetime.now()
    new = current.replace(
        day=current.day, hour=1, minute=0, second=0,
        microsecond=0) + datetime.timedelta(days=1)
    secs = (new - current).total_seconds()

    # wait for that difference in time
    threading.Timer(secs, daily_update_loop).start()
Exemple #6
0
def pretreQues(question):
    new_question = question
    corpus, documents_tf = textBuild.unknown()
    vocabulary, documents = textBuild.clean_words(corpus,
                                                  documents_tf=documents_tf)
    getdata = getData.getData()
    stop_words = getdata.read_stopword()
    vocabulary
    que_list = jieba.cut(new_question)  # 可能需要事先添加一些符合业务场景的专有词,不然结巴分词无法识别
    que_list = " ".join(que_list)
    que_list = que_list.split(" ")
    List = []
    for word in que_list:
        if not (word.strip() in stop_words) and len(word.strip()) > 1:
            List.append(word.lower())

    isin = []
    for word in List:
        if word in vocabulary:
            #        print(True)
            isin.append(0)
        else:
            #        print(False)
            isin.append(1)


#    print(isin)
    if sum(isin) == 0:
        new_document = []
        for word in List:
            word_loc = vocabulary.index(word)
            new_document.append(word_loc)
        return new_document
    else:
        print("词库无法匹配")
Exemple #7
0
def indexId(stId):
    if current_user.is_authenticated == False:
        return redirect(url_for('login'))
    aa = stId.split("-")
    name = getID.getName(aa[0])
    data = getData.getData(aa[0])
    datatoday = getData.getTodayCsv(aa[0])
    datalive = getData.getLive(aa[0])
    dataTec = getData.getAll(aa[0])
    dataFin = getData.getAllFin(aa[0])
    dataPre = getData.getPreByDay(aa[0], 10)
    dataNews = getData.getNewsS(aa[0], n)
    dataFav = accountSql.getlike(current_user.id)
    dataInd = accountSql.getIde(current_user.id)
    dataArt = accountSql.getArtcile(aa[0])
    dataReArt = accountSql.getReArtcile(aa[0])
    lenArt = len(dataArt)
    toArt = "0"
    inArt = "0"
    if len(aa) == 2:
        toArt = aa[1]
    elif len(aa) == 3:
        toArt = aa[1]
        inArt = aa[2]
    return render_template('index.html', stock = aa[0], name = name, re = data, today = datatoday, tec = dataTec, fin = dataFin, pre = dataPre, news = dataNews, n = n, reFav = dataFav, live = datalive, ind = dataInd, art = dataArt, lenArt = lenArt, reArt = dataReArt, toArt = toArt, inArt = inArt)  
Exemple #8
0
def getIngrident(name):
    ItemList = []
    response = table.query(KeyConditionExpression=Key('recipe_name').eq(name))

    for items in response['Items']:
        for key in items:
            if ('Ingredient' in key):
                ItemList.append(items[key])

    cartList = getData(ItemList)

    result = ""
    missed = " ingredients that are not available are "
    miss_product = ""
    for product in cartList:
        if (cartList[product].ItemNumber == 0):
            miss_product += product + ", "
        else:
            result += cartList[product].Description + " of brand " + cartList[
                product].Brand + " and price $" + str(
                    cartList[product].Price) + ", "

    #item = response['Item']
    if (miss_product == ""):
        missed = ""
    else:
        missed += miss_product
        # result+missed
    return result + missed
def RunDenseNet(batch_size, nb_epoch, depth, nb_dense_block, nb_filter,
                growth_rate, dropout_rate, weight_decay):
    (X_train, Y_train), (X_test,
                         Y_test), nb_classes, img_dim = getData.getData()
    model = DenseNet.DenseNet(nb_classes, img_dim, depth, nb_dense_block,
                              growth_rate, nb_filter, dropout_rate,
                              weight_decay)
    model.summary()
    """Paper Suggests using SGD"""
    opt = SGD(lr=0.0, momentum=0.9, nesterov=True, decay=weight_decay)

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=["accuracy"])
    """Custom learning Schedule"""
    lrs = LearningRateScheduler(custom_LR, verbose=1)
    print("Training")
    model.fit(x=X_train,
              y=Y_train,
              batch_size=batch_size,
              epochs=nb_epoch,
              callbacks=[lrs],
              verbose=2)
    print("Evaluating")
    scores = model.evaluate(X_test, Y_test, batch_size=64, verbose=2)
    print('Test Loss: ', scores[0])
    print('Test Accuracy: ', scores[1])
    model.save("model.h5")
Exemple #10
0
def getDailyData():
    # error trapping: inputs, get data
    try:
        # parse year, month, day
        curDate = mainDate.get().strip().split("-")
        year = int(curDate[0])
        month = int(curDate[1])
        day = int(curDate[2])

        getDataObj = getData.getData(mainCompany.get().upper(), year, month,
                                     day, year, month, day)
        getDataObj.getDailyStockData()
        curStock = getDataObj.data
    except:
        # show warning message
        messagebox.showwarning("ERROR", "Invalid SYMBOL/DATE")
    else:
        if curStock == []:
            # show warning message
            message = "Market not open on " + mainDate.get().strip()
            messagebox.showwarning("ERROR", message)
        else:
            report = "DATE" + ": " + str(curStock[0]) + "\n" + \
                    "OPEN" + ": " + str(curStock[1]) + "\n" + \
                    "HIGH" + ": " + str(curStock[2]) + "\n" + \
                    "LOW" + ": " + str(curStock[3]) + "\n" + \
                    "CLOSE" + ": " + str(curStock[4]) + "\n" + \
                    "VOLUME" + ": " + str(curStock[5])

            # show warning message
            messagebox.showinfo(mainCompany.get().upper(), report)
Exemple #11
0
def predict(routeid, C, time, rainfall):
    path = '/home/user/GraduationProject/models/' + routeid + '.h5'
    if (os.path.isfile(path)):
        model = load_model(path)
        data, MinValue = getData(routeid)
        test = pd.Series([C, time, rainfall, MinValue])
        newdata = pd.DataFrame(
            [list(test)],
            columns=["C", "datacollecttime", "rainfall", "value"])
        data = pd.concat([data, newdata])
        Features, Label, Label_NUM = preprocess(data)
        #scores = model.evaluate(x=Features,y=Label)
        #print ('scores:',scores[1])

        p = model.predict(Features[-1:])
        temp = MinValue * 10
        pro = 0
        for i in p[0]:
            #print("i:",i,"temp:",temp)
            pro = pro + i * temp
            temp += 10
        print(routeid, pro)
    else:
        print(routeid, "NULL")
    print(" ")
Exemple #12
0
def originData():
    getdata = getData()
    data = getdata.read_text()
    stop_words = getdata.read_stopword()
    doc_content = data
    dict_data = doc_content.split("\n")
    dict_data.remove("")
    return dict_data, stop_words
Exemple #13
0
def login():
    uname, password = gd.getData(2)
    flag = lgv.loginValidate(uname, password)  #validation of user
    if flag == True:
        print("Successfully logged in")
        return True
    else:
        print("Invalid credentials")
        return False
Exemple #14
0
def getDataAndToken(opt):
    '''
	获取爬虫出的已经处理好的 token list
	'''
    cnt, title, desc = data.getData(opt)
    d_tokens = []
    for d in desc:
        d_nopunc = token.HandlePunctuation(d)
        d_tokens += token.getToken(d_nopunc)
    return d_tokens
Exemple #15
0
    def test_search_valid_pokemon(self):
        my_data = getData()
        pokemon = my_data.get('kakuna')
        my_pokemon = pokemon.json()
        with open('kakuna.json') as kakuna:
            data = json.load(kakuna)

        self.assertEqual(data['weight'], my_pokemon['weight'])

        for i in my_pokemon['moves']:
            #print(i['move']['name'])
            self.assertTrue(i['move']['name'] in data['moves'])
Exemple #16
0
 def run(*args):
     print ("connected.....");
     
     while True:
         wind_tenBit = 0;
         Ubatt_tenBit = 0;
         Ibatt_tenBit = 0;
         
         for i in range(config.times):
             wind_tenBit = wind_tenBit + getData.getData(config.windChannel)
             Ubatt_tenBit = Ubatt_tenBit + getData.getData(config.UbattChannel)
             Ibatt_tenBit = Ibatt_tenBit + getData.getData(config.IbattChannel)
             
             time.sleep((config.delay) / config.times)
          
         wind_tenBit = (wind_tenBit / (config.times));
         Ubatt_tenBit = (Ubatt_tenBit / (config.times));
         Ibatt_tenBit = (Ibatt_tenBit / (config.times));
         
         
         ws.send('{"pw":"' + config.password + '", "data":[{"wind":' + str(wind_tenBit) + ',"Ubatt":' + str(Ubatt_tenBit) + ',"Ibatt":' + str(Ibatt_tenBit) + '}]}') # sending data in an json format to the server
def register_user():
    uname, password, name, email, contact = gd.getData(1)
    amount = int(input("Enter amount to be deposited "))
    f = open('UserData.txt', 'a+')
    f1 = open('Backup.txt', 'a+')
    f.write(uname + " " + password + " " + name + " " + email + " " + contact +
            " " + str(amount) + "\n")  #save details in UserData.txt file
    f1.write(uname + " " + password + " " + name + " " + email + " " +
             contact + " " + str(amount) +
             "\n")  #create a Backup file concurrently
    f.close()
    f1.close()
    print("Successfully registered ")
    return True
def evaluate():

    batches_in_epoch = batch_size * 10
    accuracy = 0

    for i in xrange(batches_in_epoch):
        nextX, nextY, objCoords, _ = getData(batch_size, datasetName, img_size, img_size, hasLabel, maxNumObj)

        feed_dict = {inputs_placeholder: nextX, labels_placeholder: nextY,
                     onehot_labels_placeholder: dense_to_one_hot(nextY)}
        r = sess.run(reward, feed_dict=feed_dict)
        accuracy += r

    accuracy /= batches_in_epoch
    print("ACCURACY: " + str(accuracy))
Exemple #19
0
def input(runSettingIn, populationSizeIn, filename):
    global runSetting
    global populationSize
    global C
    global N
    runSetting = runSettingIn
    populationSize = populationSizeIn

    tab, N, C = getData(filename)

    for i in range(0, N):
        Weight.append(tab[i][0])
        Profits.append(tab[i][1])
    populationSize = populationSizeIn
    runSetting = runSettingIn
def relocate(name):
    g = geocoder.google(name)
    x, y = g.lat, g.lng
    if g.status == 'OK':
        landslide_map = getData([g.southwest, g.northeast])
        currentMap[0] = landslide_map
        return render_template("layout.html",
                               view_map=landslide_map,
                               location=name)
    else:
        print("Going to the Default")
        return render_template("layout.html",
                               view_map=currentMap[0],
                               location='Not found',
                               error="Could not locate {}".format(name))
def verbaDeputado(numId):
	url = 'http://dadosabertos.almg.gov.br/ws/prestacao_contas/verbas_indenizatorias/legislatura_atual/deputados/'+str(numId)+'/datas?formato=json'

	deputado = []
	deputado = getData(url)["list"]

	for dataVerba in deputado:
		ano = dataVerba["dataReferencia"]["$"][0]+dataVerba["dataReferencia"]["$"][1]+dataVerba["dataReferencia"]["$"][2]+dataVerba["dataReferencia"]["$"][3]
		mes = dataVerba["dataReferencia"]["$"][5]+dataVerba["dataReferencia"]["$"][6]
		
		if mes[0] == "0":
			mes = mes[1]

		url2 = 'http://dadosabertos.almg.gov.br/ws/prestacao_contas/verbas_indenizatorias/deputados/'+str(numId)+'/'+str(ano)+'/'+str(mes)+'?formato=json'
		print url2
Exemple #22
0
	def getValores(self):		
		saidaValores = []
		pegaValores = getData()
			
		for cond in self.getConditions():
			for c in cond:
				#print "Condicao: ", c
				aplicaoExecutar = conexaoMySql.consulta(self, "SELECT * FROM aplications WHERE idaplications = " + str(c[1]) + "");				
				horaInicio = datetime.now() - timedelta(seconds=aplicaoExecutar[0][4])
				#print "hora de Inicio ", horaInicio
				valorDaCondicao = []
				valorDaCondicao = pegaValores.pegaDados(c, horaInicio)
				saidaValores.append({"condicao":c, "valor":valorDaCondicao})

		del pegaValores
		return saidaValores
Exemple #23
0
def login():
    """View function for login view."""
    logger.info('Logging in')

    params = request.get_json()
    username = params.get('username', None)
    password = params.get('password', None)

    if not username:
        return jsonify({"msg":
                        "Missing username parameter"}), Status.HTTP_BAD_REQUEST
    if not password:
        return jsonify({"msg":
                        "Missing password parameter"}), Status.HTTP_BAD_REQUEST
    try:
        db = MySQLdb.connect(
            host="localhost",  # your host 
            user="******",  # username
            passwd="root",  # password
            db="traffic")
        query = "select password from users where username='******'"
        cur = db.cursor()
        cur.execute(query)
        count = cur.rowcount
        if count == 0:
            logger.info("Login Failed Due to missing username")
            return jsonify({"msg": "Incorrect Username"
                            }), Status.HTTP_BAD_UNAUTHORIZED
        data = cur.fetchall()
        if password != data[0][0]:
            logger.info("Login Failed Due to incorrect password")
            return jsonify({"msg": "Incorrect password"
                            }), Status.HTTP_BAD_UNAUTHORIZED

        ret = {
            'jwt': create_jwt(identity=username),
            'exp': datetime.utcnow() + current_app.config['JWT_EXPIRES']
        }
        logger.info('Getting data from S3 bucket')
        global trafficData
        trafficData = getData.getData()
        logger.info("Data retrieval successful")
        return jsonify(ret), Status.HTTP_OK_BASIC
    except:
        logger.info('Failed')
        return jsonify({"msg": "Server Error"}), Status.HTTP_BAD_REQUEST
Exemple #24
0
def main():
    dataset = Dataset()
    counts = {
        'cpvs_not_found': 0,
        'it': 0,
        'not_it': 0
    }


    idsDictionary = getIds('jan2020.csv')
    for date in idsDictionary:
        ids = idsDictionary[date]

        for id in ids:
            data = getData(id)
            # soup = BeautifulSoup(data, 'html.parser')
            # cpvs = extractCPVsFromSoup(soup)
            cpvs = extractCPVsFromData(data)
            # print('Found CPVs:', cpvs)

            if (cpvs is not None):
                if(containsITCPV(cpvs)):
                    investments = extractInvestmentsFromData(data)
                    print('Adding document to dataset...')
                    dataset.addEntry({
                        'id': id,
                        'cpv': cpvs,
                        'date': date,
                        'investments': investments
                    })
                    counts['it'] += 1
                else:
                    counts['not_it'] += 1
            else:
                raise Exception('CPVs NOT FOUND ' + id)
                counts['cpvs_not_found'] += 1

    print('Results:')
    print('IT Licitations Found: ' + str(counts['it']))
    print('Not-IT Licitations Found: ' + str(counts['not_it']))
    print('Licitations with unknown CPVs: ' + str(counts['cpvs_not_found']))

    print('Exporting dataset...')
    dataset.exportAsCSV('./data.csv')
def regDeputado(numId):
	url = 'http://dadosabertos.almg.gov.br/ws/deputados/'+str(numId)+'?formato=json'
	registroDeputado = []
	registroDeputado = getData(url)["deputado"]


	#print	"\n\tRedes Sociais\n"
#	output("saida.out", "\n\tRedes Sociais\n")

#	dicio = {}

	for redeSoc in registroDeputado["redesSociais"]:
		print "\t\t", redeSoc["redeSocial"]["nome"], "-", redeSoc["url"]
#		dicio.update({redeSoc["redeSocial"]["nome"]:redeSoc["url"]})
#		saida = "\t\t"+redeSoc["redeSocial"]["nome"]+" - "+redeSoc["url"]+"\n"
#		output("saida.out", saida.encode('utf-8'))
	
		saida = "\t\t{\n\t\t\"redeSocial\": {\n\t\t\t\"nome\": \""+redeSoc["redeSocial"]["nome"]+"\",\n\t\t\t\"url\": \""+redeSoc["url"]+"\",\n\t\t},\n\t}"
		output("teste.out", saida.encode('utf-8'))
def evaluate():

    batches_in_epoch = batch_size * 10
    accuracy = 0

    for i in xrange(batches_in_epoch):
        nextX, nextY, objCoords, _ = getData(batch_size, datasetName, img_size,
                                             img_size, hasLabel, maxNumObj)

        feed_dict = {
            inputs_placeholder: nextX,
            labels_placeholder: nextY,
            onehot_labels_placeholder: dense_to_one_hot(nextY)
        }
        r = sess.run(reward, feed_dict=feed_dict)
        accuracy += r

    accuracy /= batches_in_epoch
    print("ACCURACY: " + str(accuracy))
Exemple #27
0
def main():
    default_threshold_PCA = cf.config("default_threshold_PCA")
    default_threshold_label = cf.config("default_threshold_label")
    print "---------------------------------------------------------------------"
    mode = raw_input(
        "Mode? 'e'=estimateAccuracy, 't'=testing, 'g'=getData -> ")
    print "---------------------------------------------------------------------"
    # estimateAccuracy mode
    if (mode == "e"):
        flag_PCA = raw_input(
            "Principle Component Analysis? 'y'=yes, [press enter]=no -> ")
        if (flag_PCA == "y"):
            threshold_PCA = raw_input(
                "PCA feature number (1 to 37)?  [press enter]=default -> ")
            if (threshold_PCA == ""): threshold_PCA = default_threshold_PCA
            tr.estimateAccuracy(True, float(threshold_PCA))
        else:
            tr.estimateAccuracy(False, 1.1)
    # getData mode
    elif (mode == "g"):
        fileName = raw_input("File name for saving data? -> ")
        gd.getData(fileName, mode, False, 1.1, 0)
    # test mode
    elif (mode == "t"):
        flag_PCA = raw_input(
            "Principle Component Analysis? 'y'=yes, [press enter]=no -> ")
        if (flag_PCA == "y"):
            threshold_PCA = raw_input(
                "PCA feature number (1 to 37)?  [press enter]=default -> ")
            if (threshold_PCA == ""): threshold_PCA = default_threshold_PCA
            threshold_label = raw_input(
                "Label value threshold for prediction? [press enter]=default -> "
            )
            if (threshold_label == ""):
                threshold_label = default_threshold_label
            fileName = raw_input("File name for saving data? -> ")
            gd.getData(fileName, mode, True, float(threshold_PCA),
                       long(threshold_label))
        else:
            threshold_label = raw_input(
                "Label value threshold for prediction? [press enter]=default -> "
            )
            if (threshold_label == ""):
                threshold_label = default_threshold_label
            fileName = raw_input("File name for saving data? -> ")
            gd.getData(fileName, mode, False, 1.1, long(threshold_label))
    # wrong mode
    else:
        print "wrong mode... program terminated"
def main():
	pair = "USD_JPY"
	timeframe = "H1"
	data = getData(pair, timeframe, 51)

	#ignore last datapoint since candle is incomplete
	inputs = data[0:50]
	print "Inputs:\n", inputs
	 
	modelPath = 'models/lstm.h5'
	model = load_model(modelPath)

	normalizedInput, p0  = normalize(inputs)
	reshapedInput = np.reshape(normalizedInput, (1,50,1))

	normalizedPrediction = predict(model, reshapedInput)
	print "Normalized Prediction:\n", normalizedPrediction

	denormalizedPrediction = denormalize(normalizedPrediction, p0)
	print "Denormalized Prediction:\n", denormalizedPrediction
Exemple #29
0
def TrimmingFile():
    """
    Purpose:
        Trim Koustav's Data file to get a list of dates
    Pre-conditions:
        Data files and program to read in data files must exist
        Current data files include
            03_10_12_diff_RKN_15_21_ut_all_data.txt     ; many different days
    Post-conditions:
        Output a file with just the days
    Return:
        none
    """

    FILE = "03_10_12_diff_RKN_15_21_ut_all_data.txt"
    SAVE_PLOTS = False

# < -- GET DATA -- >
    my_data = getData(FILE)
    month = np.asarray(my_data[0])
    day = np.asarray(my_data[1])
Exemple #30
0
def loadCompany():
    global loaded, dataList, fullDateList, dateList, openList, highList, lowList, closeList, volumeList, year

    # error trapping: inputs, get data
    try:
        # get and save data
        getDataObj = getData.getData(mainCompany.get().upper(),
                                     int(mainYear.get()), 1, 1,
                                     int(mainYear.get()), 12, 31)
        getDataObj.getStockData()
        dataList = getDataObj.dataList
        year = dataList[0][0][:4]
    except:
        # show warning message
        messagebox.showwarning("ERROR", "Invalid SYMBOL/YEAR")
    else:
        # insert datalist into DB table
        dbTable.insertAllData(dataList)

        # parse data into seprated list
        fullDateList = []
        dateList = []
        openList = []
        highList = []
        lowList = []
        closeList = []
        volumeList = []

        for x in dataList:
            fullDateList.append(x[0])
            dateList.append(x[0].replace("-", ".")[5:7])
            openList.append(x[1])
            highList.append(x[2])
            lowList.append(x[3])
            closeList.append(x[4])
            volumeList.append(x[5])

        # print to console
        loaded = True
        print("\nData Loaded:", mainCompany.get().upper())
Exemple #31
0
def gS(filename, verbose, num_conv):
    # create output file
    outStats = open(r'output_files/out_stats.csv', 'w')
    # write header
    outStats.write("conv_id,#_messages,#_authors\n")
    # read data in from xml
    data = getData(filename)
    # list of all conversations in the data set
    conversations = range(1, 2489)
    # determine how many conversations to to the analysis on.
    if num_conv is None:
        num_conv = len(conversations)
    for i in range(1, num_conv + 1):
        # iterate through the requested number of conversations
        authors = {}
        num_messages = 0
        for msg in data[4:]:
            # if not the number we want, skip to next message
            if (int(msg.items()[0][1]) != i): continue
            # if we have passed the message, break for efficiency
            if (int(msg.items()[0][1]) > i): break
            # increment number of messages
            num_messages += 1
            # author of the message is given in the first index
            auth = msg[1].text
            if (auth not in authors):
                authors[auth] = 1
            else:
                authors[auth] += 1
        # number of authors is given by the number of keys in the authors dict, which are all unique
        num_authors = len(authors.keys())
        # write to output file
        outStats.write("{:d},{:d},{:d}\n".format(i, num_messages, num_authors))
        # optional command line output.
        if (verbose):
            print("Conversation Number {}:".format(i))
            print("{} total message(s)".format(str(num_messages)))
            print("{} total author(s)".format(str(num_authors)))
            print("End Conversation {}.".format(i))
            print("")
def getMap(artistName, n):
    ngramMap = {}
    rhymeMap = {}
    lyrics = getData.getData(artistName)
    totalSyllables = 0
    numLines = 0
    rhymeWord = "not chosen yet"
    lines = lyrics.split('\n')
    numLines += len(lines)
    for line in lines:
        #line = unidecode.unidecode(line)
        words = line.split()
        if len(words) == 0: continue
        for i in range(0, len(words)):
            prevWords = [None for x in range(0, n)]
            for j in range(1, n + 1):
                if i - j >= 0:
                    prevWords[-j] = words[i - j]
            totalSyllables += nsyl(words[i])[0]
            ngram = tuple(prevWords)
            if ngram in ngramMap:
                ngramMap[ngram].append(words[i])
            else:
                ngramMap[ngram] = [words[i]]
        if rhymeWord == "not chosen yet":
            rhymeWord = words[-1]
        else:
            if rhymeWord in rhymeMap:
                rhymeMap[rhymeWord].append(words[-1])
            else:
                rhymeMap[rhymeWord] = [words[-1]]
            if words[-1] in rhymeMap:
                rhymeMap[words[-1]].append(rhymeWord)
            else:
                rhymeMap[words[-1]] = [rhymeWord]
            rhymeWord = "not chosen yet"
    avgSyllables = totalSyllables / float(numLines)
    return ngramMap, rhymeMap, avgSyllables
def rP(filename, verbose, num_conv):
    # create output files
    outQs = open(r'output_files/questions_found.csv', 'w')
    outQs.write("conv_id,#_questions\n")
    outTxtSpk = open(r'output_files/txtspk_found.csv', 'w')
    outTxtSpk.write("conv_id,#_txt_abbreviations\n")
    # read data in from xml
    data = getData(filename)
    # group data by conversation id
    conversations = {}
    for message in data[4:]:  # ignore starting info
        conv_id = int(message.items()[0][1])  # conversation number
        if (conv_id not in conversations):
            conversations[conv_id] = [message[2].text]
        else:
            conversations[conv_id] = conversations[conv_id] + [message[2].text]
    if num_conv is None:
        num_conv = len(conversations.keys())
    for i in range(1, num_conv + 1):
        if (verbose): print("Conversation Number {}:".format(i))
        messages = conversations[i]
        # check for questions
        result = checkFor(messages, r'.*\?.*')
        num_questions = sum(result)
        outQs.write(str(i) + "," + str(num_questions) + "\n")
        if (verbose): print("{} question(s)".format(num_questions))
        # check for text speak
        result = [0] * num_conv
        for reg in text_speak:
            temp = checkFor(messages, reg, ignorecase=True)
            result = [sum(x) for x in zip(result, temp)]
        num_text_speak = sum(result)
        outTxtSpk.write(str(i) + "," + str(num_text_speak) + "\n")
        if (verbose): print("{} text abbreviation(s)".format(num_text_speak))
        if (verbose): print("End Conversation {}.".format(i))
        if (verbose): print("")
    outQs.close()
    outTxtSpk.close()
 def __init__(self):
     tk.Tk.__init__(self)
     data = gD.getData('*', 'customerPerformanceInfo')
     rowN = 1
     for n in data:
         rowN += 1
     t = SimpleTable(self, rowN, 6)
     t.pack(side="top", fill="x")
     t.set(0, 0, 'SessionID')
     t.set(0, 1, 'CustomerID')
     t.set(0, 2, 'Start Session')
     t.set(0, 3, 'End Session')
     t.set(0, 4, 'fitness Device')
     t.set(0, 5, 'burnt calories')
     x = 1
     y = 0
     for i in data:
         for r in i:
             t.set(x, y, r)
             y += 1
             if y == 6:
                 x = x + 1
                 y = 0
 def __init__(self):
     tk.Tk.__init__(self)
     data = gD.getData('*', 'customerPerformanceInfo')
     rowN = 1
     for n in data:
         rowN +=1
     t = SimpleTable(self, rowN,6)
     t.pack(side="top", fill="x")
     t.set(0,0,'SessionID')
     t.set(0,1,'CustomerID')
     t.set(0,2,'Start Session')
     t.set(0,3,'End Session')
     t.set(0,4,'fitness Device')
     t.set(0,5,'burnt calories')
     x=1
     y=0
     for i in data:
         for r in i:
             t.set(x,y,r)
             y+=1
             if y==6:
                 x=x+1
                 y=0
Exemple #36
0
def prepareData():
    """
    function to prepare padded sequences of titles concatenated with texts for training and testing
    the dates and subjects of the news have been excluded as they don't explain much the news being real of fake
    :return:
        trainPad: numpy array of padded sequences of titles followed by texts for training
        trainLabels: numpy array of training labels [0 for real news, 1 for fake news]
        testPad: numpy array of padded sequences of titles followed by texts for testing
        testLabels: numpy array of testing labels [0 for real news, 1 for fake news]
    """

    from keras.preprocessing.text import Tokenizer
    from keras.preprocessing.sequence import pad_sequences

    [[trainData, trainLabels], [testData, testLabels]] = getData(trainSize=0.9)
    categories = list(trainData)

    # Tokenize title and text
    tt_tokenizer = Tokenizer(num_words=parameters["vocab_size"])
    tt_tokenizer.fit_on_texts(trainData['title'] + " : " +
                              trainData['text'])

    trainSeq = tt_tokenizer.texts_to_sequences(trainData['title'] + " : " +
                                               trainData['text'])
    trainPad = pad_sequences(trainSeq,
                             maxlen=parameters["max_len"],
                             padding=parameters["padding"],
                             truncating=parameters["truncate"])

    testSeq = tt_tokenizer.texts_to_sequences(testData['title'] + " : " +
                                              testData['text'])
    testPad = pad_sequences(testSeq,
                            maxlen=parameters["max_len"],
                            padding=parameters["padding"],
                            truncating=parameters["truncate"])

    return [[np.array(trainPad), np.array(trainLabels)], [np.array(testPad), np.array(testLabels)]]
def infoDeputado(status):
	cont = 0
	if status == 1:
		listaDeputado = []
		listaDeputado = getData('http://dadosabertos.almg.gov.br/ws/deputados/situacao/1?formato=json')["list"]

		lista = []
		dicio = {"lista":lista}

		for deputado in listaDeputado:
			print deputado["nome"], "-", deputado["partido"]
#			saida = deputado["nome"]+" - "+deputado["partido"]
#			output("saida.out", saida.encode('utf-8'))
#			lista.append(regDeputado(deputado["id"])

			saida = "{\n\t\"nome\": \""+deputado["nome"]+"\",\n\t\"partido\": \""+deputado["partido"]+"\",\n\t\"redesSociais\": ["
			output("teste.out", saida.encode('utf-8'))
			regDeputado(deputado["id"])

			saida = "\t],\n},\n"
			output("teste.out", saida.encode('utf-8'))
#			verbaDeputado(deputado["id"])

	print dicio["lista"]
# Este script captura os deputados em exercício da ALMG
# link: http://dadosabertos.almg.gov.br/ws/deputados/ajuda#Lista de Deputados em Exercício
#
# O que retornar

import urllib
import json
from StringIO import StringIO
from getData import getData

#***********************************************************************************************#
# Inicia filtrando apenas os deputados em exercício.
# Isto é, do array info[] apenas importa o IDentificador de cada deputado em exercício 
info = []
info = getData('http://dadosabertos.almg.gov.br/ws/deputados/em_exercicio?formato=json')["list"]


#***********************************************************************************************#
# Tendo o IDentificador de deputados em exercício, as próximas linhas criam um array com a url
# APENAS dos deputados com o ID em questão (que estão em exercício).
# Também estou coletando PARTIDO e NOME dessa lista.

#url = []
filiacoes = []

for data in info:
#	url.append("http://dadosabertos.almg.gov.br/ws/deputados/" + str(data["id"]) + "?formato=json")
	j = {}
	j['name'] = str(data['partido'])
#	del data['partido']
import urllib
import json
from StringIO import StringIO
from getData import getData

info = []
info = getData('http://dadosabertos.almg.gov.br/ws/proposicoes/pesquisa/direcionada?tipo=PL&sitTrami=0&formato=json')["resultado"]

for data in info["listaItem"]:
	print "-------------------------------------------"
	print "Nome: ", data["proposicao"].encode("utf-8")
	print "Autor: ", data["autor"].encode("utf-8")
	
	print ""
	
	if "assuntoGeral" in data:
		print "Assunto Geral: ", data["assuntoGeral"].encode("uff-8")
	else:
		print "Assunto Geral: null"

	print ""

	if "indexacao" in data:
		print "Indexacao: ", data["indexacao"].encode("uff-8")
	else:
		print "Indexacao: null"

	print "-------------------------------------------"
Exemple #40
0
from getData import getData

title = raw_input("Enter a movie title ")

title = title.replace(' ', '%20')

request = getData()
rawData = request.getJSON("http://www.omdbapi.com/?t=" + title)
data = request.toJSON(rawData)

keys = ["Released", "Runtime", "Genre", "Actors", "Director", "Writer", "Plot"];

for key in keys:
	print data[key];  

raw_input("Completed..")
Exemple #41
0
import sys
import os

from getEpic import ConvertToHtml, ParseEpicFromHtml
from getData import getData

if __name__ == "__main__":
	fileName = sys.argv[1]
	print("Converting PDF to html")
	html = ConvertToHtml(fileName)
	print("Parsing Epic No from html")
	ParseEpicFromHtml(html)
	os.remove("output.html")
	print("Getting Data from CEO site")
	getData("data.xlsx")
Exemple #42
0
barsSinceEntry = 0
numRuns = 0
myBPV = 0
allowPyr = 0
curShares = 0
#---------------------------------------------------------------------------------
  #End of Lists and Variables
#---------------------------------------------------------------------------------

#---------------------------------------------------------------------------------
  #Get the raw data and its associated attributes [pointvalue,symbol,tickvalue]
  #Read a csv file that has at least D,O,H,L,C - V and OpInt are optional
  #Set up a portfolio of multiple markets
#---------------------------------------------------------------------------------

dataClassList = getData()
numMarkets = len(dataClassList)
portfolio = portfolioClass()

#---------------------------------------------------------------------------------
# SET COMMISSION, NUMBER OF BARS TO BACK TEST, AND RAMP UP FOR INDICATORS
#---------------------------------------------------------------------------------

commission = 100 # deducted on a round turn basis
numBarsToGoBack = 2000 # number of bars from the end of data
rampUp = 100 # need this minimum of bars to calculate indicators
sysName = 'TripleMA' #System Name here


#////////  DO NOT CHANGE BELOW /////////////////////////////////////////////////
for marketCnt in range(0,numMarkets):
def login():
    """
        Here we read a rfid card and try to log the user in.
        firstly we get the rfidCards from the database.
    """
    dbData = gD.getData('loggedIn, rfidNumber, customerID', 'loginInfo')

    global cardA, keypad, backData, scanPass
    while True:
        breakOut = reader(False)
        cardData = backData
        cardData = str(cardData)
        backData = ""
        if keypad.getKey() == "*" or breakOut == True:
            sleep(1)
            if scanPass != "":
                close(scanPass)
                scanPass = ""
            #turn off all led colors
            GPIO.output(ledRed, False)
            GPIO.output(ledGreen, False)
            GPIO.output(ledBlue, False)
            break

        else:
            rowCounter = 1
            for row in dbData:
                    if cardData == row[1] and row[0] == 1:
                        # GPIO.output(ledRed, False)
                        # GPIO.output(ledGreen, True)
                        servoThread = threading.Thread(target=turnServo)
                        servoThread.start()
                        for i in range(3):
                            GPIO.output(ledGreen, False)
                            sleep(0.3)
                            GPIO.output(ledGreen, True)
                            sleep(0.3)
                        # GPIO.output(ledGreen, False)
                        # GPIO.output(ledRed, True)
                        updateInjection = ('customerID = ' + str(row[2]))
                        gD.updateData('loginInfo', 'loggedIn = false', updateInjection)# 0 en 1?
                        sleep(1)

                    elif cardData == row[1] and row[0] == 0:
                        servoThread = threading.Thread(target=turnServo)
                        servoThread.start()
                        for i in range(3):
                            GPIO.output(ledGreen, False)
                            sleep(0.3)
                            GPIO.output(ledGreen, True)
                            sleep(0.3)
                        #dataID = gD.getDataWhere('customerID', 'customerInfo', 'RFIDNumber={}'.format(row[1]))
                        updateInjection = ('customerID = ' + str(row[2]))
                        gD.updateData('loginInfo', 'loggedIn = true', updateInjection)# 0 en 1?
                        sleep(1)

                    elif cardData != row[1] and rowCounter == len(dbData):
                        print("niet oke")
                        GPIO.output(ledGreen, False)
                        for i in range(5):
                            GPIO.output(ledRed, True)
                            sleep(0.5)
                            GPIO.output(ledRed, False)
                            sleep(0.5)
                        sleep(1)
                        GPIO.output(ledGreen, True)
            rowCounter += 1
        sleep(0.1)
from getData import getData

pyplot.close("all")

showImage = 1
# datasetName = 'oneObj_big'
datasetName = 'multiObj_balanced'
hasLabel = True
img_size1 = 90
img_size2 = 90
batch_size = 10
maxNumObj = 7

selectedImgIdx = 0

imgBatch, nextY, coords, _ = getData(batch_size, datasetName, img_size1, img_size2, hasLabel, maxNumObj)
for i in xrange(batch_size):
    nextY[i] = int(nextY[i])

print 'image batch dimension:'
print np.shape(imgBatch)


thiscoord = coords[selectedImgIdx]
print np.shape(coords)
print np.shape(thiscoord)
print thiscoord[:,0]
print thiscoord[:,1]


print nextY
        if draw:
            fig = plt.figure()
            txt = fig.suptitle("-", fontsize=36, fontweight='bold')
            plt.ion()
            plt.show()
            plt.subplots_adjust(top=0.7)
            plotImgs = []

        # training
        for step in xrange(start_step + 1, max_iters):
            start_time = time.time()

            # get the next batch of examples
            # nextX, nextY = dataset.train.next_batch(batch_size)
            nextX, nextY, objCoords, _ = getData(batch_size, datasetName, img_size, img_size, hasLabel)
            # print nextX
            # print nextY
            # sys.exit('STOP')
            # if translateMnist:
            #     nextX, nextX_coord = convertTranslated(nextX, MNIST_SIZE, img_size)

            feed_dict = {inputs_placeholder: nextX, labels_placeholder: nextY, \
                         onehot_labels_placeholder: dense_to_one_hot(nextY), b_placeholder: b_fetched}
            fetches = [train_op, cost, reward, predicted_labels, correct_labels, glimpse_images, b, avg_b, rminusb, \
                       p_loc_orig, p_loc, mean_locs, sampled_locs, outputs[-1], lr]
            # feed them to the model
            results = sess.run(fetches, feed_dict=feed_dict)

            _, cost_fetched, reward_fetched, prediction_labels_fetched, correct_labels_fetched, f_glimpse_images_fetched, \
            b_fetched, avg_b_fetched, rminusb_fetched, p_loc_orig_fetched, p_loc_fetched, mean_locs_fetched, sampled_locs_fetched, \
Exemple #46
0
#!/usr/bin/env python
import os
import sys
import re

from getFile import getFile
from getData import getData
from toXls import toXls
from getCalc import getCalc


#    file = open(filePath,'r')

def display():
    '''Display data in graph

        Not currently implemented
    '''
    a = 1
if __name__=='__main__':
    path = getFile()
    data = getData(path)
    data = getCalc(data)
    toXls(data,path)
    display()
Exemple #47
0
	#获取预测结果
	preValue = n.activate(preData)
	#设置预测值
	if preValue > 0:
		v = 1
	else:
		v = -1
	#将预测结果保存到数据库中
	g.save(v,time)


if __name__ =='__main__':
	#获取当天时间
	time = g.todayTime()
	#获取最新的训练数据和预测数据
	trainData,preData,num = g.getData()
	#构建神经网络和数据集
	n = createBP(num,num+1,1)
	DS = getDS(num,1,trainData)
	#训练并预测神经网络
	predict(n,DS,preData)