Esempio n. 1
0
def BuildFeatureVectorForTweet(tweet):

    #print "BuildFeatureVectorForTweet Called"
    global char_n_grams_index, word_n_grams_index
    happy, sad, anger, fear, surprise, disgust, hashtags, usernames, \
    urls, punctuations_marks_count, repetitive_words, char_n_grams, \
    word_n_grams, upper_case_words, intensifiers, negations, score_vector = pre.PreProcessing(tweet)

    feature_vector = []
    #print char_n_grams_index
    #print word_n_grams_index
    feature_vector = AddEmoticonFeatures(feature_vector, happy, sad, disgust,
                                         anger, fear, surprise)
    feature_vector = AddCharNGramFeatures(feature_vector, char_n_grams_index,
                                          char_n_grams)
    feature_vector = AddWordNGramFeatures(feature_vector, word_n_grams_index,
                                          word_n_grams)
    feature_vector = AddRepetitiveWordsFeature(feature_vector,
                                               repetitive_words)
    feature_vector = AddPunctuationMarksFeature(feature_vector,
                                                punctuations_marks_count)
    feature_vector = AddUpperCaseWordsFeature(feature_vector, upper_case_words)
    feature_vector = AddIntensifersFeature(feature_vector, intensifiers)
    feature_vector = AddNegationsFeature(feature_vector, negations)
    feature_vector = AddLexiconScoreFeature(feature_vector, score_vector)
    return feature_vector
Esempio n. 2
0
def _main():
    data = datasets.open_subtitles(download=False)
    processor = preprocessing.PreProcessing()
    # s = list(data.values())[0]

    token_vectors, dictionary = load_token_vectors(processor)

    print('[train] creating network...')
    input_dim = len(dictionary['words'])
    input_length = MAX_LENGTH
    output_length = MAX_LENGTH
    output_dim = input_dim
    n_hidden = 10
    depth = 4
    batch_size = 50
    nb_epoch = 10

    dialog = models.Dialog(input_dim=input_dim,
                           input_length=input_length,
                           hidden_dim=n_hidden,
                           output_length=output_length,
                           output_dim=output_dim,
                           depth=depth)
    model = dialog.create_model()
    print('[train] validating...')
    for vectors in token_vectors:
        print('epoch')
        x_train, x_test, y_test, y_train = dialog.get_training_batch(vectors)
        dialog.train(x_train,
                     y_train,
                     batch_size=batch_size,
                     nb_epoch=nb_epoch,
                     validation_data=(x_test, y_test),
                     save_model=True)
Esempio n. 3
0
 def __init__(self, image, N, M, L, tau1, tau2, tau3, tau4, tau5, counter, f_report):
     self.start_time = time.time()
     self.start_date = datetime.now()
     self.N = N
     self.M = M
     self.L = L
     self.counter = counter
     self.image = image
     self.image_preprocessed = np.array([])
     self.sigOrig = bitarray()
     self.sigGen = bitarray()
     f = open("signature.bin", "rb")
     self.f_report = f_report
     self.sigOrig.fromfile(f)
     self.pre_process = pre.PreProcessing(L, False)
     self.extract_process = extract.SignatureExtraction(N, M, L)
     self.matching_process = match.SignatureMatching(self.sigOrig[0:238], tau1, tau2, tau3, tau4, tau5)
Esempio n. 4
0
                         table_to="stats_dim")
loader.insert_form_data(data=form_dim,
                        server=server,
                        database=database,
                        table_to="form_dim")
loader.insert_fact_data(data=fifa_fact,
                        server=server,
                        database=database,
                        table_to="fifa_fact")

### PRE-PROCESSING ###
training_data = extractor.query_data(server=server,
                                     database=database,
                                     table="fifa_fact")

preprocessor = preprocessing.PreProcessing()

X_train, x_test, Y_train, y_test = preprocessor.preprocess(
    data=training_data,
    test_size=0.25,
    train_size=0.75,
    random_state=69,
    target_variable="Value"  # Possibilites: "Value", "Wage", "Release_Clause"
)

### MODEL BUILDING ###
modeller = model.Model()

gbr_model, rfr_model, dtr_model = modeller.train_models(X_train,
                                                        Y_train,
                                                        n_estimators=1000,
Esempio n. 5
0
from bitarray import bitarray
import time
import multiprocessing
import sys
from picamera import PiCamera
from picamera.array import PiRGBArray
from time import sleep
import gc

cv2.setUseOptimized(True)
gc.enable()
#Define objects
sigOrig = bitarray()
f = open("signature.bin", "rb")
sigOrig.fromfile(f)
pre_process = pre.PreProcessing(128, False)
extract_process = extract.SignatureExtraction(8, 4, 128)
matching_process = match.SignatureMatching(sigOrig[0:238], 24, 38, 4, 28, 22)

camera = PiCamera()
camera.resolution = (544, 400)
#camera.framerate = 30

##camera.led = False
rawCapture = PiRGBArray(camera, size=(544, 400))

counter = 0
true_counter = 0
camera.start_preview()
time.sleep(5)
f_report = open("Quality_Reports_Image/new_timing7.txt", "w")
Esempio n. 6
0
 def run_preprocessing(self, logger, io_config):
     Prep = prep.PreProcessing(logger, io_config)
     Prep.process_user_table()
Esempio n. 7
0
import sys

pre.cv2.setUseOptimized(True)

cap = cv2.VideoCapture(0)
cap.set(3, 1024)
cap.set(4, 768)
counter = 0
while (counter < 60):
    ret, image = cap.read()

    cv2.imshow("deneme", image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    pre_process = pre.PreProcessing(image, 128, False)

    points = pre_process.get_contour(3)

    check = pre_process.get_perspective(points)
    if not check:
        print "ERROR:Contour not detected"
    else:
        image2 = pre_process.get_scaled()

        cv2.imshow("scaled", image2)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        image3 = pre_process.get_cropped()