def ff_labels_softmax():
    X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(
    )  #loads the mnist dataset
    #X_train=np.concatenate((X_train,X_val,X_test),axis=0)[:65000,:]
    #y_train=np.concatenate((y_train,y_val,y_test),axis=0)[:65000]
    input_var = T.tensor4('inputs')
    target_var = T.ivector('targets')
    print("Building model and compiling functions...")
    network = build_cnn(input_var)
    test_prediction = lasagne.layers.get_output(network, deterministic=True)
    ff_fn = theano.function([input_var],
                            test_prediction)  #feed forward function
    s = np.empty(
        [10, 50000]
    )  #stores the indices of samples with decreasing probabilities(p1,p2,p3...,p10)
    avg = np.empty([10])  #stores the average of store
    no_of_clusters = 10

    with np.load('model_dropout_test.npz') as f:
        param_values = [f['arr_%d' % i] for i in range(len(f.files))]
    lasagne.layers.set_all_param_values(network, param_values)

    train_input = X_train

    test_prediction = ff_fn(train_input)

    ff_output = np.empty([50000])

    for i in range(50000):
        ff_output[i] = np.argmax(test_prediction[i, :])
    '''
	for i in range(10):
		for n in range(no_of_clusters):
        		s[n,:]=np.argsort(test_prediction[:,n])[::-1]
                	avg[n]=np.mean(test_prediction[np.asarray(s[n,0:5000],int),n])
		arg=np.argmax(avg)
        	print(arg)
        	for j in range(5000):
        		ff_output[s[arg,j]]=arg
                test_prediction[:,arg]=0#equivalent to deleting the prob class        
	'''
    return ff_output
示例#2
0
import mytools
import networks
import main_methods
import load_mnist
theano.config.optimizer = "None"

params = mytools.get_CLI_parameters(sys.argv)

params["model"] = "mlp"
params["data"] = "mnist"
params["n_classes"] = 10
params["network_input_shape"] = (-1, 28 * 28)
params["epochs"] = 3
heatmap_shape = (28, 28)

X_train, y_train, X_val, y_val, X_test, y_test = load_mnist.load_dataset()
mean = np.mean(X_train)
std = np.std(X_train)
X_train = (X_train - mean) / std
X_val = (X_val - mean) / std
X_test = (X_test - mean) / std
data = (X_train, y_train,
        X_val, y_val,
        X_test, y_test)

# MLP TRAINING
mlp, mlp_params = main_methods.train_network(data, params)
mlp_prediction_func = mlp_params["prediction_func"]
if hasattr(mlp, "nonlinearity") and mlp.nonlinearity == softmax:
    mlp.nonlinearity = lambda x: x
output_var_mlp = lasagne.layers.get_output(mlp)
示例#3
0
#profmode = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
#profmode1 = theano.ProfileMode(optimizer='fast_run', linker=theano.gof.OpWiseCLinker())
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
    assert len(inputs) == len(targets)
    if shuffle:
        indices = np.arange(len(inputs))
        np.random.shuffle(indices)
    for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
        if shuffle:
            excerpt = indices[start_idx:start_idx + batchsize]
        else:
            excerpt = slice(start_idx, start_idx + batchsize)
        yield inputs[excerpt], targets[excerpt]


X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(
)  #loads the mnist dataset
#X_train=np.concatenate((X_train,X_val,X_test),axis=0)[:65000,:]
#y_train=np.concatenate((y_train,y_val,y_test),axis=0)[:65000]
input_var = T.tensor4('inputs')
target_var = T.fmatrix('targets')
print("Building model and compiling functions...")

network = build_cnn(input_var)
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.binary_crossentropy(prediction, target_var)

lambd = 10
pred = prediction.sum(axis=0)
pred = pred / pred.sum()
pred1 = pred * T.log(pred)
loss = loss.mean() + lambd * pred1.mean()
示例#4
0
def main():
    
    trainx, trainy, testx, testy, valx, valy = load_dataset()
    print(np.shape(trainx))