Esempio n. 1
0
def tune_submissionfile_cutoff(path_to_file, name, submissionnumber=1):

    #load data
    df_filenames, df_data = load_data(path_to_file)

    #sort data on test data order
    df_data = sort_dataframe(df_data, df_filenames)

    #create new submissionfile
    Output.to_outputfile(df_data, submissionnumber, name, clean=False) #not clean!
Esempio n. 2
0
def tune_submissionfile(path_to_file, scale_parameter, name, submissionnumber=1):

    #load data
    df_filenames, df_data = load_data(path_to_file)

    #tune probabilities
    df_data = tune_probabilities(df_data, scale_parameter)

    #sort data on test data order
    df_data = sort_dataframe(df_data, df_filenames)

    #create new submissionfile
    Output.to_outputfile(df_data, submissionnumber, name)
Esempio n. 3
0
 def add_tap(self, n):
     if isinstance(n, list):
         for one_tap in n:
             self.taps.append(one_tap)
             this_name = "tap_" + ("%06d"%one_tap)
             self.add_output(Output(name=this_name,
                                    width=self.children[0].width,
                                    expression="",
                                    central_name_gen=self.central_name_gen))
     else:
         assert(isinstance(n, int))
         self.taps.append(n)
         this_name = "tap_" + ("%06d" % n)
         self.add_output(Output(name=this_name,
                                width=self.children[0].width,
                                expression="",
                                central_name_gen=self.central_name_gen))
Esempio n. 4
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        from copy import deepcopy
        new_kwargs = deepcopy(kwargs)
        del (new_kwargs["name"])
        if "expression" in new_kwargs:
            del(new_kwargs["expression"])
        if "central_name_gen" in new_kwargs:
            new_kwargs["central_name_gen"] = kwargs["central_name_gen"]
        self.taps = []
        self.add_input(Input(name="clk",
                             width=1,
                             expression="",
                             **new_kwargs))

        self.input_list = kwargs["input_list"]
        num_resets = 0
        num_sets = 0
        num_toggles = 0
        self.input_name_list = []
        for one_char in kwargs["input_list"]:
            if one_char.lower() == "r":
                one_name = "rst_" + ("%04d" % num_resets)
                one_sig = Input(name=one_name, width=1, expression="", **new_kwargs)
                self.add_input(one_sig)
                self.add_device(one_sig)
                num_resets += 1
                self.input_name_list.append(one_name)
            if one_char.lower() == "s":
                one_name = "set_" + ("%04d" % num_sets)
                one_sig = Input(name=one_name, width=1, expression="", **new_kwargs)
                self.add_input(one_sig)
                self.add_device(one_sig)
                num_sets += 1
                self.input_name_list.append(one_name)
            if one_char.lower() == "t":
                one_name = "tog_" + ("%04d" % num_toggles)
                one_sig = Input(name=one_name, width=1, expression="", **new_kwargs)
                self.add_input(one_sig)
                self.add_device(one_sig)
                num_toggles += 1
                self.input_name_list.append(one_name)

        out_sig = Signal(name="q_sig", width=1, expression="", **new_kwargs)
        self.signal_list.append(out_sig)
        self.add_output(Output(name="q", width=1, expression="", **new_kwargs))
Esempio n. 5
0
    def compress(self, in_file_name, out_file_name=''):
        # assign output filename
        if (out_file_name == ''):
            out_file_name = in_file_name.split('.')[0] + ".bin"

        print('Compressing "%s" -> "%s"' % (in_file_name, out_file_name))
        image = Image.open(in_file_name)
        print('Image dimensions: %d x %dpx' % (image.width, image.height))
        size_raw = raw_size(image.width, image.height)
        print('RAW size: %d bytes' % size_raw)

        counts = count_symbols(image)
        tree = build_tree(counts)
        codes = assign_binary_patterns(tree)
        size_compressed = compressed_size(counts, codes)
        print('Compressed size: %d bytes' % size_compressed)

        print('Writing...')
        stream = Output(out_file_name)
        encoder = Encoder(stream)

        # encode image dimensions
        encoder.encode_header(image)
        stream.flush()
        size_header = stream.bytes_written
        print('* Header: %d bytes' % size_header)

        # encode Huffman table
        encoder.encode_tree(tree)
        stream.flush()
        size_tree = stream.bytes_written - size_header
        print('* Tree: %d bytes' % size_tree)

        # encode image pixel data
        encoder.encode_pixels(image, codes)
        stream.close()
        size_pixels = stream.bytes_written - size_tree - size_header
        print('* Pixels: %d bytes' % size_pixels)

        size_wrote = stream.bytes_written
        print('Compressed %d bytes.' % size_wrote)
        space_saving = 100 * float(1 - size_wrote / size_raw)
        print('Memory reduced by %0.2f' % (space_saving), '%.')
Esempio n. 6
0
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     from copy import deepcopy
     new_kwargs = deepcopy(kwargs)
     del(new_kwargs["name"])
     del(new_kwargs["width"])
     if "expression" in new_kwargs:
         del(new_kwargs["expression"])
     if "central_name_gen" in new_kwargs:
         new_kwargs["central_name_gen"] = kwargs["central_name_gen"]
     self.children.append(BasicDelay(**kwargs))
     self.taps = []
     self.add_input(Input(name="clk",
                          width=1,
                          expression="",
                          **new_kwargs))
     self.add_input(Input(name="rst", width=1, expression="", **new_kwargs))
     self.add_input(Input(name="d_in", width=self.children[0].width, expression="", **new_kwargs))
     self.add_output(Output(name="d_out", width=self.children[0].width, expression="", **new_kwargs))
Esempio n. 7
0
# Load test data
df_validationset_caf = Input.load_validationset_caffefeatures()

print("--- load data: %s seconds ---" % round((time.time() - start_time),2))
start_time = time.time()

x_train = df_trainset_caf
y_train = df_trainset_lab
x_test = df_validationset_caf

# Train model
rf = RandomForestClassifier(n_estimators=500)
rf.fit(x_train, y_train)

print("--- train model: %s seconds ---" % round((time.time() - start_time),2))
start_time = time.time()

# Predict
preds = rf.predict_proba(x_test)
predsdf = pd.DataFrame(preds)

print("--- prediction: %s seconds ---" % round((time.time() - start_time),2))
start_time = time.time()

# Create ouput file
Output.to_outputfile(predsdf,1,'RF')

print("--- generate output: %s seconds ---" % round((time.time() - start_time),2))

print "\n\n ... Done"
x_testdata = pd.read_csv('HOG_features_test_8_16_1.csv', sep=',',
                         header=None).values

#load classification
y_traindata = np.asarray(Input.load_traindata_labels())

print('training classifier')
#Train classifier
clf = OneVsRestClassifier(SVC(kernel='poly', probability=True))
clf.fit(x_traindata, y_traindata)

# now you can save it to a file
with open('classifierpolytraindata_HOG_8_16_1.pkl', 'wb') as f:
    pickle.dump(clf, f)

## and later you can load it
with open('classifierpolytraindata_HOG_8_16_1.pkl', 'rb') as f:
    clf = pickle.load(f)

#Make predictions
preds = clf.predict_proba(x_testdata)
predsdf = pd.DataFrame(preds)
predsdf.to_pickle('predictions_testset_HOG_8_16_1_poly_SVC.pkl'
                  )  # where to save it, usually as a .pkl

predsdf = pd.read_pickle('predictions_testset_HOG_8_16_1_poly_SVC.pkl')
#Write outputfile
check = predsdf
predsdf = check
Output.to_outputfile(check, 1, 'polySVC_traindata_HOG_8_16_1_SVC')
# -*- coding: utf-8 -*-
"""
Created on Sat May 28 09:12:26 2016

@author: roosv_000
"""

from IO import Output
import pandas as pd
from evaluation import logloss
predsdf = pd.read_pickle(
    r'C:\Users\roosv_000\Desktop\predictions_valset_HOG_8_16_1_linear_SVC.pkl')
#Write outputfile
check = predsdf
predsdf = check
Output.to_outputfile(check,
                     1,
                     'linearSVC_trainset_HOG_8_16_1_clean_17_6',
                     clean=True,
                     validation=True)

from IO import Output
import pandas as pd
from evaluation import logloss
[df_filenames, df_data] = logloss.load_data(
    'outputfile_20160617_1_linearSVC_trainset_HOG_8_16_1_clean_17_6.csv')
Score = logloss.compute_logloss(df_filenames, df_data)
x_validationset = x_validationset.groupby(x_validationset.index).apply(transformXY)
x_testset = x_testset.groupby(x_testset.index).apply(transformXY)
x_traindata = x_traindata.groupby(x_traindata.index).apply(transformXY)

#Normalise the data
df = x_traindata.iloc[:,1:]
df_norm = (df - df.mean(axis=1)) / (df.max(axis=1) - df.min(axis=1))
x_traindata = df_norm

#Train classifier
clf = OneVsRestClassifier(SVC(C=0.1,kernel='poly', probability=True))
clf.fit(x_traindata, y_traindata)

# now you can save it to a file
with open('SKINclassifierpolytrainset_SVC_c01.pkl', 'wb') as f:
    pickle.dump(clf, f)

## and later you can load it
with open('SKINclassifierlineartraindata_onevsone_padded_SVC_rs5.pkl', 'rb') as f:
    clf = pickle.load(f)
    
#Make predictions
preds = clf.predict_proba(x_testdata)
predsdf = pd.DataFrame(preds)
predsdf.to_pickle('predictions_SKIN_poly_c01_validationset.pkl')  # where to save it, usually as a .pkl

#Write outputfile
check = predsdf
predsdf = check
Output.to_outputfile(check,1,'SKINpoly_c01_clean_validationset',clean=True, validation=True)
Output.to_outputfile(check,1,'SKINpoly_c01_testdata',clean=False, validation=False)