예제 #1
0
import tensorflow as tf
import numpy as np
import data_loader



#data_file_train = "/Users/peter/Documents/Work/data/drag_design/NK1_training_disguised.csv"
#data_file_test = "/Users/peter/Documents/Work/data/drag_design/NK1_test_disguised.csv"
data_file_train = "/mnt/DeepLearning4Medical/data/drag_design/NK1_training_disguised.csv"
data_file_test = "/mnt/DeepLearning4Medical/data/drag_design/NK1_test_disguised.csv"


# fill the file_dir
drag_data = data_loader.read_data_sets(data_file_train, data_file_test, 500)
trX, trY, teX, teY = drag_data.train.descriptors, drag_data.train.activities, drag_data.test.descriptors, drag_data.test.activities
num_features = drag_data.train.num_features

'''

OUTPUT = "/mnt/DeepLearning4Medical/data/work_code/DNN/test/NK1_filter.csv"

output_object = open(OUTPUT, "w")

sample_count = 0

for sample in trX:
	for desc in sample:
		output_object.write(str(desc))
		output_object.write(",")
	output_object.write(str(trY[sample_count]))
	output_object.write("\r\n")
예제 #2
0
    return model


'''
	Load Dataset
'''

# file address in docker envirement
data_file_train = "/mnt/DeepLearning4Medical/data/drag_design/" + PARAM_TEST_FILE_NAME + "_training_disguised.csv"
data_file_test = "/mnt/DeepLearning4Medical/data/drag_design/" + PARAM_TEST_FILE_NAME + "_test_disguised.csv"

#data_file_train = "/Users/peter/Documents/Work/data/drag_design/METAB_training_disguised.csv"
#data_file_test = "/Users/peter/Documents/Work/data/drag_design/METAB_test_disguised.csv"

# fill the file_dir
drag_data = data_loader.read_data_sets(data_file_train, data_file_test,
                                       PARAM_NON_ZEROS_CUTOFF)
trX, trY, teX, teY = drag_data.train.descriptors, drag_data.train.activities, drag_data.test.descriptors, drag_data.test.activities
num_features = drag_data.train.num_features

#print "NUM OF FEATURE: ", num_features
'''
	Begin train
'''

# fill the dims
X = tf.placeholder("float", [None, num_features])
Y = tf.placeholder("float")

w1 = init_weight([num_features, 4000], "w_hidden_1")
b1 = init_bias(4000, "bias_1")
예제 #3
0
import os
import numpy as np
from data_loader import read_data_sets
import convnet
from sklearn import metrics

# working directory
workingdir = os.getcwd()

# models directory
modeldir = os.path.join(workingdir, 'models/example')
my_model = os.path.join(modeldir, 'model.cpkt')

# load data
datadir = os.path.join(os.getcwd(), './data/mnist')
data_provider = read_data_sets(datadir)
x_test = data_provider.test.images
x_test = np.pad(x_test, ((0, 0), (2, 2), (2, 2), (0, 0)),
                'constant')  # pad input
y_test = np.argmax(data_provider.test.labels, 1)  # dense labels

# network definition
net = convnet.ConvNet(channels=1,
                      n_class=10,
                      is_training=False,
                      cost_name='baseline')

# classification performance
n_test = data_provider.test.images.shape[0]
batch_size = 512
predictions = np.zeros_like(y_test)
예제 #4
0
	pre_h4 = T.nnet.relu(T.dot(h3, w4)) + b4
	h4 = drop(pre_h4, 0.1)

	pyx = T.dot(h4, wo) + bo

	return pyx


#### Loading data sets #####

data_file_train = "/mnt/DeepLearning4Medical/data/drag_design/NK1_training_disguised.csv"
data_file_test = "/mnt/DeepLearning4Medical/data/drag_design/NK1_test_disguised.csv"

# fill the file_dirs
drag_data = data_loader.read_data_sets(data_file_train, data_file_test, PARAM_NON_ZEROS_CUTOFF)
trX, trY, teX, teY = drag_data.train.descriptors, drag_data.train.activities, drag_data.test.descriptors, drag_data.test.activities
num_features = drag_data.train.num_features

############################

COST_OUTPUT = "/mnt/DeepLearning4Medical/Theano_test_output/NK1_cost_test3.txt"
R2_OUTPUT = "/mnt/DeepLearning4Medical/Theano_test_output/NK1_R2_test3.txt"
cost_object = open(COST_OUTPUT, 'w')
R2_object = open(R2_OUTPUT, 'w')

############################

X = T.fmatrix()
Y = T.fmatrix()
예제 #5
0
import tensorflow as tf
import numpy as np
import data_loader

#data_file_train = "/Users/peter/Documents/Work/data/drag_design/NK1_training_disguised.csv"
#data_file_test = "/Users/peter/Documents/Work/data/drag_design/NK1_test_disguised.csv"
data_file_train = "/mnt/DeepLearning4Medical/data/drag_design/NK1_training_disguised.csv"
data_file_test = "/mnt/DeepLearning4Medical/data/drag_design/NK1_test_disguised.csv"

# fill the file_dir
drag_data = data_loader.read_data_sets(data_file_train, data_file_test, 500)
trX, trY, teX, teY = drag_data.train.descriptors, drag_data.train.activities, drag_data.test.descriptors, drag_data.test.activities
num_features = drag_data.train.num_features
'''

OUTPUT = "/mnt/DeepLearning4Medical/data/work_code/DNN/test/NK1_filter.csv"

output_object = open(OUTPUT, "w")

sample_count = 0

for sample in trX:
	for desc in sample:
		output_object.write(str(desc))
		output_object.write(",")
	output_object.write(str(trY[sample_count]))
	output_object.write("\r\n")
	sample_count+=1

output_object.close()
from data_loader import read_data_sets
from networks import capsnet, lenet, baseline

# Load data
# Experiment 1: Limited amount of data. For example percentage_train=5 to use 5% of balanced training data.
# Experiment 2: Class-imbalance. For example unbalance=True to reduce to 20% the digits 0 and 8 (by default),
# to specify other configurations change the values in unbalance_dict={"percentage": 20, "label1": 0, "label2": 8}.
# Experiment 3: Data augmentation.

data_provider = read_data_sets("./data/mnist")

print("Size of:")
print("- Training-set:\t\t{}".format(len(data_provider.train.labels)))
print("- Validation-set:\t\t{}".format(len(data_provider.validation.labels)))
print("- Test-set:\t\t{}".format(len(data_provider.test.labels)))

# Configuration experiment
model_path = "./models/mnist/capsnet/"

# optimizer parameters
name_opt = "adam"
learning_rate = 1e-3
opt_kwargs = dict(learning_rate=learning_rate)
# training parameters
batch_size = 128
n_epochs = 5

# Network definition
net = capsnet.CapsNet(n_class=10, channels=1, is_training=True)

# Training
예제 #7
0
if corrupt_labels:
    modeldir = os.path.join('./models/', 'noise', str(perc), strategy,
                            curriculum_type)
elif unbalance:
    modeldir = os.path.join('./models/', 'unbalance',
                            str(unbalance_dict['percentage']), strategy,
                            curriculum_type)

print(modeldir)

# load data
datadir = os.path.join(os.getcwd(), './data/mnist')  # data directory
data_provider = read_data_sets(datadir,
                               init_probs=init_probs,
                               subsets=subsets,
                               corrupt_labels=corrupt_labels,
                               unbalance=unbalance,
                               unbalance_dict=unbalance_dict,
                               percentage_train=perc / 100.0)
n_train = data_provider.train.num_examples
print('Number of training images {:d}'.format(n_train))
# more training parameters
iters_per_epoch = np.ceil(1.0 * n_train / batch_size).astype(np.int32)
decay_steps = decay_after_epoch * iters_per_epoch
opt_kwargs = dict(learning_rate=lr,
                  decay_steps=decay_steps,
                  decay_rate=decay_rate)

# definition of the network
net = ConvNet(channels=1,
              n_class=10,
예제 #8
0
# -*- coding: utf-8 -*-
"""
@Time: 2019/5/24 13:30
@Author: liulei
@File: MnistClassifier
@desc: mnist - MnistClassifier
"""

import data_loader
import tensorflow as tf

if __name__ == '__main__':
    # 初始化数据集
    mnist = data_loader.read_data_sets('MNIST_data/', one_hot=True)

    # 定义占位符x,y
    x = tf.placeholder('float', [None, 784])
    y = tf.placeholder('float', [None, 10])

    # 定义模型参数w,b
    w = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))

    # 定义模型输出计算公式,结果大小为None*10
    y_ = tf.nn.softmax(tf.matmul(x, w) + b)

    # 定义损失函数公式
    cross_entropy = -tf.reduce_sum(y * tf.log(y_))

    # 定义最小化模型训练方法
    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
# optimization parameters
nepochs = 15  # 15
name_opt = 'adam'
momentum = 0.9
lr = 1e-3  # learning rate
decay_rate = 0.1  # decay learning rate by x
decay_after_epoch = 10  # decay learning rate after x epochs
batch_size = 128
dropout = 0.9
cost = 'cross_entropy'  # loss to minimize

# load data
perc = 30  # percentage of training data
datadir = os.path.join(os.getcwd(), './data/mnist')  # data directory
data_provider = read_data_sets(datadir, percentage_train=perc / 100.0)
n_train = data_provider.train.num_examples
print('Number of training images {:d}'.format(n_train))
# more training parameters
iters_per_epoch = np.ceil(1.0 * n_train / batch_size).astype(np.int32)
decay_steps = decay_after_epoch * iters_per_epoch
opt_kwargs = dict(learning_rate=lr,
                  decay_steps=decay_steps,
                  decay_rate=decay_rate)

# definition of the network
net = convnet.ConvNet(channels=1, n_class=10, is_training=True, cost_name=cost)

# definition of the trainer
trainer = convnet.Trainer(net,
                          optimizer=name_opt,