Beispiel #1
0
def main():
    conn, curr = utility_functions.connect()

    events = [('ErangelSolo', 25)]
    awards = [{'First': '$5000', 'Second': '$2500', 'Third': '$1000'}]

    if len(sys.argv) > 1:
        if sys.argv[1] == '-t':
            clear()
            test.testing(curr)
        else:
            print('Arguments not recognized, terminating program.')
            return
    else:
        if not table_creation.isredundant(curr):
            table_creation.create_tables(events, awards, conn, curr)

            for event in events:
                competition.run_competition(event, conn, curr)
                game_creation.Game.team_id = 1

        display_title()

        while True:
            display_top_menu()
            if not handle_choice(conn, curr):
                conn.close()
                break
Beispiel #2
0
def main(args):
    start_time = time.time()

    if args.training:
        training(args)

    if args.testing:
        testing(args)

    end_time = round((time.time() - start_time) / 60, 4)
    print(f'Done!; {end_time}min spend')
Beispiel #3
0
def main():
    dataTrain, labelsTrain = train.train_imgToArray()
    cowsTrain, labelsTrain = train.train_convertToNumpy(dataTrain, labelsTrain)
    x_train, y_train = train.train_createData(cowsTrain, labelsTrain)
    model = train.kerasModel()
    model = train.training(model, x_train, y_train)

    dataTest, labelsTest = test.test_imgToArray()
    cowsTest, labelsTest = test.test_convertToNumpy(dataTest, labelsTest)
    x_test, y_test = test.test_createData(cowsTest, labelsTest)
    test.testing(model, x_test, y_test)
Beispiel #4
0
def main():
    trainx, trainy, testx, testy = load_data.load_data()

    model = Sequential()
    model.add(Dense(8, input_dim=2, activation = 'tanh'))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.2))
    model.fit(trainx, trainy, batch_size=4, epochs=500)

    detail_about_model(model)

    test.testing(model, testx, testy)
def main():

    trainGenerator, valGenerator, testGenerator = create_generators()

    if args.phase == 'train':
        model, history = training(trainGenerator, valGenerator)

    elif args.phase == 'test':

        file_path = "./checkpoint"
        model = tf.keras.models.load_model(file_path)

        testing(testGenerator, model)

    else:
        print("/!\ Unknown phase : type 'train' or 'test'")
        exit(0)
def retrain(path, feedback, label, model_path, isthatpic):
    epochs = 1
    batch_size = 1

    if isthatpic:
        model_name = "\\model_default_resnet_v1.h5"
    else:
        model_name = "\\model_default_emoticon.h5"
    date = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
    x_train, pixel = load_image(path, isthatpic)
    y_train = [0., 0., 0., 0., 0., 0., 0.]
    y_train[labels.index(label)] = 1.
    y_train = np.asarray([y_train], dtype=np.float32)
    model = load_model(model_path + model_name)
    if isthatpic:
        if not os.path.isdir(model_path + "/pic_model"):
            os.mkdir(model_path + "/pic_model")
        record_feedback(pixel, feedback, label, date,
                        model_path + "/pic_feed.csv", isthatpic)
        ori = test.testing(model_path + model_name, model_path)
        model.fit(x_train, y_train, batch_size, epochs, verbose=0)
        new_model = "\\model_" + date
        model.save(model_path + new_model + ".h5")
        new = test.testing(model_path + new_model + ".h5", model_path)
        if new > ori:
            os.system('move ' + model_path + model_name + " " + model_path +
                      "\\pic_model" + new_model + "_improve.h5")
            os.system('move ' + model_path + "\\pic_model" + new_model +
                      ".h5 " + model_path + model_name)
        else:
            os.system('move ' + model_path + new_model + ".h5 " + model_path +
                      "\\pic_model" + new_model + ".h5")

    else:
        if not os.path.isdir(model_path + "/paint_model"):
            os.mkdir(model_path + "/paint_model")
        record_feedback(pixel, feedback, label, date,
                        model_path + "/paint_feed.csv", isthatpic)
        model.fit(x_train, y_train, batch_size, epochs, verbose=0)
        new_model = "/model_emoticon_" + date + ".h5"
        os.system('move ' + model_path + model_name + " " + model_path +
                  "/paint_model/" + new_model)
        print('move ' + model_path + model_name + " " + model_path +
              "/paint_model/" + new_model)
        model.save(model_path + model_name)
Beispiel #7
0
def main():
    conn, curr = utility_functions.connect()
    if len(sys.argv) > 1:
        if sys.argv[1] == '-t':
            clear()
            test.testing(curr)
        else:
            print('Arguments not recognized, terminating program.')
            return
    else:
        competition.main_code(conn, curr, 25)
        display_title()

        while True:
            display_top_menu()
            if not handle_choice(conn, curr):
                conn.close()
                break
Beispiel #8
0
def output():
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file' not in request.files:
            flash('No file part')
            return redirect('index')
        file = request.files['file']
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            flash('No selected file')
            return redirect('index')
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            fullname = os.path.join(app.config['UPLOAD_FOLDER'], filename) 
            file.save(fullname)

            Extraction.init(fullname)
            text = Main.main()
            logotext = test.testing()

            return render_template('display.html', text=text, logotext=logotext)
    else:
        return redirect('index')
Beispiel #9
0
test.print_data("Poisson Noise Attack", "poisson_noise_attack.jpg",
                "watermarked_lena.jpg")

# In[16]:

Watermarking.extracted(
    image_path="poisson_noise_attack.jpg",
    extracted_watermark_path="watermark_extracted_poisson_noise.jpg")
Watermarking.psnr_cal(img1="watermark.jpg",
                      img2="watermark_extracted_poisson_noise.jpg")
print("For the Watermark image")
test.print_data("Poisson Noise Attack", "watermark.jpg",
                "watermark_extracted_poisson_noise.jpg")
test.plot_image("watermark.jpg", "watermark_extracted.jpg",
                "Poisson Noise Attack ")

# In[17]:

test.testing("watermarked_lena.jpg")

# In[18]:

test.rotate("watermarked_lena.jpg")

# In[19]:

x = cv2.imread("1.PNG")
x.shape

# In[ ]:
Beispiel #10
0
# -*- coding: utf-8 -*-
"""
File:   run.py
Author: Keerthana Bhuthala
Date: 10/07/2018
Desc: Solution for Project00

"""
import train
import test
import numpy as np

#Creating two lists to store the training data and target labels from train.py
X =[]
Y =[]

training_data = np.load('data_train.npy', 'r')
testing_data = np.load('data_test.npy','r')

# calling the  function in train.py to train the data and perform cross validation
X,Y,knn_ = train.training(training_data)

# The X and Y lists that are retrieved from train.py and given as inputs to the test,py
# calling the function in test.py to test the data and predict class labels for the test data
test.testing(X,Y,knn_,testing_data)
Beispiel #11
0
    # test no label data
    print('start predict in no label data...')
    preprocess = Preprocess(train_x_no_label, sen_len, w2v_path=w2v_path)
    embedding = preprocess.make_embedding(load=True)
    train_nolabel_x = preprocess.sentence_word2idx()
    train_nolabel_dataset = TwitterDataset(X=train_nolabel_x, y=None)
    train_nolabel_loader = torch.utils.data.DataLoader(
        dataset=train_nolabel_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=0)
    print('load model ...')
    model = torch.load(os.path.join(model_dir, 'ckpt.model'))
    y_nolabel, y_good_indices = test.testing(batch_size,
                                             train_nolabel_loader,
                                             model,
                                             device,
                                             threshold=0.8)
    total_train_num = 200000 + len(y_good_indices)
    val_num = int(0.1 * total_train_num)
    print('val_num = {num}, train_num = {val}'.format(num=val_num,
                                                      val=total_train_num -
                                                      val_num))
    print('good indices = ', len(y_good_indices))
    y_nolabel = preprocess.labels_to_tensor(y_nolabel)
    y_good_indices2 = preprocess.labels_to_tensor(y_good_indices)
    train_nolabel_x = train_nolabel_x.index_select(0, y_good_indices2)
    y_nolabel = y_nolabel.index_select(0, y_good_indices2)

    # train again with all data
    print('start processing ALL...')
Beispiel #12
0
                               cnt, model_path)

        # Training
        validation(args, tr_batch, models, ep)

        # Validation
        for b in val_batch:
            validation(args, b, models, ep)

else:

    # Real MR data test (Segmentation)
    if args.data_name == 'YS':
        test_bc = Create_Batch(args.batch_size, int(args.patch_size / 2),
                               args.n_mode - 1, p_path + '/test_ys/0')
        test_batch = test_bc.db_load()
        testing(args, test_batch, models, 0)

    # MICCAI MR data test (Segmentation)
    else:
        val_path = glob(p_path + '/validation/**')
        val_batch = []
        for path in val_path:
            val_bc = Create_Batch(args.batch_size, int(args.patch_size / 2),
                                  args.n_mode - 1, path)
            val_batch.append(val_bc.db_load())
        idx = 2
        for b in val_batch:
            testing(args, b, models, idx)
            idx += 1
Beispiel #13
0
set_seed(0)

test_loader = DataLoader(dataset=test_data,
                         batch_size=BATCH_SIZE,
                         shuffle=False,
                         collate_fn=MyData.collate_fn)

model = LSTM_set(input_dim, hidden_size)

model.load_state_dict(torch.load("model/set_model.pkl"))

device = 'cpu'

if torch.cuda.is_available():
    device = 'cuda'
else:
    device = 'cpu'

model = model.to(device)
print("use", device, "now!")

output = testing(model, test_loader, device)

output_json = {}
for i in range(1, 1501):
    output_json[str(i)] = output[i - 1]

import json
with open('answer.json', 'w') as f_obj:
    json.dump(output_json, f_obj)
Beispiel #14
0
                                          collate_fn=collate_fn)

print('\nload LSTM Model ...')
#model = torch.load(os.path.join(model_dir, 'ckpt.model'))
model = LSTM_Net(embedding,
                 embedding_dim=256,
                 hidden_dim=128,
                 num_layers=3,
                 dropout=0.2,
                 fix_embedding=True)
model = model.to(
    device)  # device為"cuda",model使用GPU來訓練(餵進去的inputs也需要是cuda tensor)
model.load_state_dict(
    torch.load(os.path.join(model_dir, 'ckpt_1.model'),
               map_location={'cuda:3': 'cuda'}))
output1 = testing(batch_size, test_loader, model, device, step=1)

print('load BI-LSTM-1 Model ...')
#model = torch.load(os.path.join(model_dir, 'ckpt.model'))
model = BILSTM_Net(embedding,
                   embedding_dim=256,
                   hidden_dim=128,
                   num_layers=3,
                   dropout=0.2,
                   fix_embedding=True)
model = model.to(
    device)  # device為"cuda",model使用GPU來訓練(餵進去的inputs也需要是cuda tensor)
model.load_state_dict(
    torch.load(os.path.join(model_dir, 'ckpt_2.model'),
               map_location={'cuda:3': 'cuda'}))
output2_1 = testing(batch_size, test_loader, model, device, step=1)
Beispiel #15
0
test.plot_image(watermark_image_name, "watermark_extracted" + jpg_image_type,
                "Poisson Noise Attack ")

# Compression attack

test.compression_test(watermarked_image)
test.plot_image(watermarked_image,
                "compressed_watermarked_lena" + jpg_image_type,
                'Compression Attack')
test.print_data("Compression Attack",
                "compressed_watermarked_lena" + jpg_image_type,
                watermarked_image)

Watermarking.extracted(
    image_path="Compressed_watermarked_lena" + jpg_image_type,
    extracted_watermark_path="watermark_extracted_compression_attack" +
    jpg_image_type)
test.calculate_psnr_nc(img1=watermark_image_name,
                       img2="watermark_extracted_compression_attack" +
                       jpg_image_type)
print("For the Watermark image")
test.print_data("Compression Attack", "watermark_extracted" + jpg_image_type,
                "watermark_extracted_compression_attack" + jpg_image_type)
test.plot_image(watermark_image_name,
                "watermark_extracted_compression_attack" + jpg_image_type,
                "Compression Attack")

test.testing(watermarked_image)

test.rotate(watermarked_image)
Beispiel #16
0
for i in range(len(arr)):
    k = 0
    arrcpy = copy.deepcopy(arr)
    del arrcpy[i]

    for j in range(len(arrcpy)):

        if arr[i][0] > arrcpy[j][0] and arr[i][1] > arrcpy[j][1] and (
                arr[i][2] + arr[i][0]) < (arrcpy[j][2] + arrcpy[j][0]) and (
                    arr[i][3] + arr[i][0]) < (arrcpy[j][3] + arr[j][0]):
            k += 1
    if k == 0:
        arrnew.append(arr[i])
print arrnew

for i in range(len(arrnew)):
    x, y, w, h = arrnew[i]
    k += 1
    string = "roi" + str(k) + ".jpg"

    roi = im[y - int(w / 10):y + h + int(w / 10),
             x - int(w / 10):x + w + int(w / 10)]
    cv2.imwrite(string, roi)
    image = cv2.rectangle(im, (x - int(w / 10), y - int(w / 10)),
                          (x + w + int(w / 10), y + h + int(w / 10)),
                          (120, 120, 120), 2)
    #cv2.imshow("Name", im)
    cv2.imwrite("roi.jpg", roi)
    test.testing(model)
cv2.waitKey(0)
Beispiel #17
0
from utils import read_data
from test import testing
from TreeNode import TreeNode, print_tree

if __name__ == '__main__':
    #Reading in train and test data
    train = read_data('data/training.txt')
    test = read_data('data/test.txt')

    #Loading attributes
    attributes = [x for x in range(len(train[0]) - 1)]

    #Testing random importance
    tree_r = testing(1, 'random', train, test, attributes)
    testing(100, 'random', train, test, attributes)

    #Testing information gain importance
    tree_ig = testing(1, 'information_gain', train, test, attributes)
    testing(100, 'information_gain', train, test, attributes)

    #Printing trees using random- and information gain importance respectively
    print('TREE STRUCTURE USING RANDOM IMPORTANCE')
    print_tree(tree_r)
    print('TREE STRUCTURE USING INFORMATION GAIN IMPORTANCE')
    print_tree(tree_ig)
Beispiel #18
0
import tensorflow as tf
import pandas as pd
import numpy as np
from time import time
import test

n_input = 9
train_person_id = input("Enter person's id : ")
test_image_path = input("Enter path of signature image : ")

train_path = 'data/Features/Training/training_'+train_person_id+'.csv'
test.testing(test_image_path)
test_path = 'data/TestFeatures/testcsv.csv'

def readCSV(train_path, test_path, type2=False):
    # Reading train data
    df = pd.read_csv(train_path, usecols=range(n_input))
    train_input = np.array(df.values)
    train_input = train_input.astype(np.float32, copy=False)  # Converting input to float_32
    df = pd.read_csv(train_path, usecols=(n_input,))
    temp = [elem[0] for elem in df.values]
    correct = np.array(temp)
    corr_train = np.eye(2)[correct]      # Converting to one hot
    # Reading test data
    df = pd.read_csv(test_path, usecols=range(n_input))
    test_input = np.array(df.values)
    test_input = test_input.astype(np.float32, copy=False)
    if not(type2):
        df = pd.read_csv(test_path, usecols=(n_input,))
        temp = [elem[0] for elem in df.values]
        correct = np.array(temp)
Beispiel #19
0
from __future__ import division
from train import training
from test import testing

filtbankN = 13

codebooks = training(filtbankN)

testing(codebooks, filtbankN)
Beispiel #20
0
# parameters
sen_len = 32
batch_size = 16

# predicting
preprocess = Preprocess(test_x, sen_len, w2v_path=w2v_model_filename)
embedding = preprocess.make_embedding(load=True)
test_x = preprocess.sentence_word2idx()

# to dataset
test_dataset = TwitterDataset(X=test_x, y=None)

# to dataloader
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          num_workers=8)

# testing
print('\nload model ...')
model = torch.load(os.path.join(model_dir, 'last_semi_82.38.model'))
outputs = testing(batch_size, test_loader, model, device)

# save result to csv
tmp = pd.DataFrame({
    "id": [str(i) for i in range(len(test_x))],
    "label": outputs
})
print("save csv ...")
tmp.to_csv(output_filename, index=False)
print("Finish Predicting")
Beispiel #21
0
import train
import test
import time

start_time = time.time()
tuple_from_learning = train.learning()
test.testing(tuple_from_learning)
print("--- %s seconds ---" % (time.time() - start_time))
Beispiel #22
0

# semi-supervised
train_x_no_label = load_training_data(train_no_label)
preprocess = Preprocess(train_x_no_label, sen_len, w2v_path=w2v_path)
embedding = preprocess.make_embedding(load=True)
train_x_no_label = preprocess.sentence_word2idx()
train_dataset = TwitterDataset(X=train_x_no_label, y=None)
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
                                            batch_size = batch_size,
                                            shuffle = False,
                                            num_workers = 8)
outputs = []
for i in range(model_num):
	model = torch.load(os.path.join(model_dir, 'ckpt'+str(i+1)+'.model'))
	outputs.append(testing(batch_size, train_loader, model, device))

# soft-voting ensemble
results = []
for j in range(len(outputs[0])):
	avg = 0
	for i in range(model_num):
		avg += outputs[i][j]
	avg /= model_num
	results.append(avg)

print("loading data ...")
train_x, y = load_training_data(train_with_label)
train_x_no_label = load_training_data(train_no_label)

# hard pseudo labeling
Beispiel #23
0
opt = opt()

#module 4
#Processing / Training
from training import train
x_train, x_test, model, opt = train(model, opt, x_train, x_test)

#module 4
from training import augment
x_train, y_train, x_test, y_test = augment(data_augmentation, x_train, y_train,
                                           batch_size, epochs, x_test, y_test,
                                           model)

#module 5
#OUTPUT:
# Save trained model
from output import save_cnn
model, model_path = save_cnn(model, save_dir, model_name)

from output import cnn_stats
model = cnn_stats(model, model_path, x_test, y_test)

#module 6
# load and test

from test import testing
testing(model_name, save_dir, batch_size, y_test)

#####################################
#####################################
Beispiel #24
0
# Parameters
valid_img_height = config.VALID.img_height
valid_img_width = config.VALID.img_width

if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--mode',
                        type=str,
                        default='train',
                        help='Running process')
    parser.add_argument('--img_height',
                        type=int,
                        default=1080,
                        help='Height of images')
    parser.add_argument('--img_width',
                        type=int,
                        default=1920,
                        help='Width of images')
    args = parser.parse_args()
    if args.mode == 'train':
        training()
    elif args.mode == 'valid':
        validate(valid_ldr_dir, valid_hdr_dir, valid_gen_dir, logs_valid,
                 valid_img_height, valid_img_width)
    elif args.mode == 'test':
        testing(test_ldr_dir, test_gen_dir, logs_test, args.img_height,
                args.img_width, 500)
    else:
        raise Exception("Unknown mode")