示例#1
0
        class_filename = filename_root + "_class.txt"
        with open(os.path.join(class_path, class_filename), 'w') as f:
            f.write(result_class_name)


# Load model
model = tf.keras.models.load_model('bacteria_model.h5', compile=False)
losses = {"class_output": "categorical_crossentropy", "segm_output": wbce}
lossWeights = {"class_output": 1.0, "segm_output": 1.0}
model.compile(optimizer='adam',
              loss=losses,
              loss_weights=lossWeights,
              metrics=['accuracy'])

# Get training data
train_data = prepare_train_data(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS, False)
X_train = train_data['X_train']

# Get testing data
test_data = prepare_test_data(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
X_test = test_data['X_test']

# Get results
[class_predictions_train, segm_predictions_train] = model.predict(X_train,
                                                                  verbose=1)
[class_predictions_test, segm_predictions_test] = model.predict(X_test,
                                                                verbose=1)

save_results_to_files(class_predictions_train, segm_predictions_train, 'train')
save_results_to_files(class_predictions_test, segm_predictions_test, 'test')
示例#2
0
            sys.exit(1)
     
    except IndexError:
        sys.stderr.write("read_tweets.py <tsvfile> <task> <training>")
        sys.exit(1)
    #if dataset == "emot":
        # emoticon datset
     ##  instances = emot_instances

        # normal dataset
    #tweets,instances,tag_map = prepare_tweet_data(tsvfile,task)
    emot_tweets= cPickle.load(open("tweet_emoticondata.pkl","rb"))
    emot_instances= cPickle.load(open("instance_emoticondata.pkl","rb"))
    tweets =emot_tweets
    instances = emot_instances
    testset_tweets,testset_instances,tag_map = prepare_test_data(testfile,task)        
    # lazy cleaning of objective and neutral
    objectives = [key for key in tweets if instances[key].label == "objective" or instances[key].label == "neutral"]
    popped = 0
    tpopped=0
    tneu = 0
    neu_count=0
    pred_file = open("task2-swatcs-A-twitter-constrained.output","wb")
    for key in objectives:
        if instances[key].label == "neutral":
            neu_count+=1
        if task == "A":
            instances.pop(key)
            tweets.pop(key)
            popped+=1
        elif task == "B":
示例#3
0
import numpy as np
import scipy.io as sio
import gradient as gr
import prepare_data as pre
import sigmoid
import random
import math
import time
import csv
import datetime
import multilayer
train_small = sio.loadmat('train_small.mat')
train = sio.loadmat('train.mat')
test = sio.loadmat('test.mat')

train_small_data = train_small['train']
train_data = train['train']
test_data = test['test']

(features, labels) = pre.prepare_train_data(train_small_data)
(x_test, y_test) = pre.prepare_test_data(test_data)

if __name__ == '__main__':
	ann = multilayer.NeuralNet()
	(weights_ret, bias_ret) = ann.train_multilayer_SGD(labels[6], features[6], .01, 500, y_test, x_test)

示例#4
0
    parser.add_argument('--model_name_predict', type=str, default="guo.h5", help='model used to predict')

    parser.add_argument('--result_path', type=str, default="./result", help='model path')
    parser.add_argument('--result_stats_path', type=str, default="./logs/statistic/", help='trained model name')

    parser.add_argument('-t','--train_mode', type=lambda x: (str(x).lower() == 'true'), default=True, help='train the model or not')
    parser.add_argument('-i','--nEpochs', type=int, default=2, help='number of epochs to train for')
    parser.add_argument('-u','--upscale_factor', type=int, default=2, help="super resolution upscale factor")

    opt = parser.parse_args()

    if opt.train_mode:
        print('===> Loading datasets')
        train_data, train_label = prepare_train_data(opt.train_data_path, opt.upscale_factor)
        print(train_data.shape)
        print(train_label.shape)
        test_data, test_label = prepare_test_data(opt.test_data_path, opt.upscale_factor)
        print(test_data.shape)
        print(test_label.shape)
        data_all = [train_data, train_label, test_data, test_label]
        print('===> Building model')
        train(data_all, os.path.join(opt.model_path, opt.model_name_train), opt.nEpochs)
        model_name_predict = opt.model_name_train
        print('===> Testing')
        stats = predict(os.path.join(opt.model_path, model_name_predict), opt.test_data_path, opt.result_path)
    else:
        print('===> Testing')
        stats = predict(os.path.join(opt.model_path, opt.model_name_predict), opt.test_data_path, opt.result_path)
    result_stats_save(stats, opt.result_stats_path)
    print('===> Complete')
示例#5
0
import numpy as np
import scipy.io as sio
import gradient as gr
import prepare_data as pre
import sigmoid
import random
import math
import time
import csv
import datetime
import multilayer
train_small = sio.loadmat('train_small.mat')
train = sio.loadmat('train.mat')
test = sio.loadmat('test.mat')

train_small_data = train_small['train']
train_data = train['train']
test_data = test['test']

(features, labels) = pre.prepare_train_data(train_small_data)
(x_test, y_test) = pre.prepare_test_data(test_data)

if __name__ == '__main__':
    ann = multilayer.NeuralNet()
    (weights_ret, bias_ret) = ann.train_multilayer_SGD(labels[6], features[6],
                                                       .01, 500, y_test,
                                                       x_test)
示例#6
0
# print "-----------------------------ERROR----------------------------"
# print J_ret
# print "-----------------------------ACCURACY----------------------------"
# print acc_ret


if __name__ == '__main__':

	train_small = sio.loadmat('train_small.mat')
	train = sio.loadmat('train.mat')
	test = sio.loadmat('test.mat')
	train_small_data = train_small['train']
	train_data = train['train']
	test_data = test['test']
	(features, labels) = pre.prepare_train_data(train_small_data)
	(x_train, y_train) = pre.prepare_test_data(train_data)
	(x_test, y_test) = pre.prepare_test_data(test_data)


	xTrain = eval(sys.argv[1])
	yTrain = eval(sys.argv[2])
	num_epochs = int(eval(sys.argv[4]))
	learning_rate = eval(sys.argv[3])
	error_type = sys.argv[5]


	(weights_ret, bias_ret) = SGD(yTrain, xTrain, learning_rate, num_epochs, error_type)

	np.savetxt('weights.txt', weights_ret)
	np.savetxt('bias.txt', bias_ret)