Ejemplo n.º 1
0
            options.args.predict_label, options.args.data_method)

#Step 8: Add all datasources and transform them to row(Y, X) format
#Custom, should be self-made!

#Step 8.1: Add the files or folders the data is preserved in (only if available)
if options.args.predict_languages:
    data.file_train = options.args.data_folder + 'training/'
    # data.file_development = 'eng-trial.pickle'
    # data.file_test = 'eng-test.pickle'

#Custom function
data.languages = options.args.predict_languages

#Load data into a file
data.train = data.load(data.file_train, format='specific_age_gender')
if data.file_development != '':
    data.development = data.load(data.file_development,
                                 format='specific_age_gender')
if data.file_test != '':
    data.test = data.load(data.file_test, format='specific_age_gender')

#Step 8.2: Formulate the preprocessing steps which have to be done
textPreprocessing = ['replaceTwitterURL', 'replaceDate', 'replaceYear']

#Step 8.3: Transform the data to our desired format
data.transform(
    _type='YXrow', preprocessing=textPreprocessing
)  #> now we got X, Y and X_train, Y_train, X_development, Y_development and X_test

#Step 8.4: For training purposes, we can specify what our subset will look like (train_size, development_size, test_size)
Ejemplo n.º 2
0
printer.system(options.args_dict)

#Step 7: Create data with default arguments
data = Data(options.args.avoid_skewness, options.args.data_folder, options.args.predict_label, options.args.data_method)

#Step 8: Add all datasources and transform them to row(Y, X) format
#Custom, should be self-made!

#Step 8.1: Add the files or folders the data is preserved in (only if available)
data.file_train = 'impression_data.csv'

#Custom function
data.languages = options.args.predict_languages

#Load data into a file
data.train, test = data.load(data.file_train, format='complex_file')
x_tester = [x[1] for x in test]
y_tester = [y[0] for y in test]
#Step 8.2: Formulate the preprocessing steps which have to be done
textPreprocessing = []

#Step 8.3: Transform the data to our desired format
data.transform(_type='YXrow', preprocessing=textPreprocessing) #> now we got X, Y and X_train, Y_train, X_development, Y_development and X_test

#Step 8.4: For training purposes, we can specify what our subset will look like (train_size, development_size, test_size)
#data.subset(500, 50, 50)

#Step 9: Specify the features to use, this part is merely for sklearn.
features = ClassifierFeatures()
#features.add('headline', TfidfVectorizer(tokenizer=TextTokenizer.tokenizeText, lowercase=False, analyzer='word', ngram_range=(1,1), min_df=1), 'headline'),#, max_features=100000)),
# features.add('headline_words', TfidfVectorizer(tokenizer=TextTokenizer.tokenizeText, lowercase=False, analyzer='word', ngram_range=(1,1), min_df=1), 'headline'),#, max_features=100000)),
Ejemplo n.º 3
0
data = Data(options.args.avoid_skewness, options.args.data_folder, options.args.predict_label, options.args.data_method)

#Step 8: Add all datasources and transform them to row(Y, X) format
#Custom, should be self-made!

#Step 8.1: Add the files or folders the data is preserved in (only if available)

file_name = 'conversion_path' 
#data.file_train = 'conversion_chance.pickle'
#data.file_train = 'conversion_product.pickle'
data.file_train = file_name'.pickle'

#Custom function

#Load data into a file
data.train = data.load(data.file_train, format='pickle')
counter = {}

for row in data.train:
  if row[0] not in counter:
    counter[row[0]] = 0
  counter[row[0]] += 1

new_train= []
for row in data.train:
  if counter[row[0]] > 5:
    new_train.append(row)
data.train = new_train

#Step 8.2: Formulate the preprocessing steps which have to be done
textPreprocessing = []