예제 #1
0
import time
import sys
from demesis.concave_fn import KLD
from fuel.schemes import ShuffledScheme
import numpy as np
#from utils import twin_plot
np.set_printoptions(threshold=np.nan)
# Create the objects

# Dual Updates

location = 'a9a'
if len(sys.argv) > 1:
    location = sys.argv[1]
    # Data reader
a9a = a9aReader(location='./datasets/' + location + '.')
a9a.read()
input_dim = a9a.input_dim
print "The input dimension is " + str(input_dim)
batch_size = 512
train_dataset, test_dataset, p = a9a.get_split(0)
print "Number of training examples is " + str(train_dataset.num_examples)
print "Number of testing examples is " + str(test_dataset.num_examples)
'''__author__: Amartya Sanyal <*****@*****.**>'''


def get_results(C=None, pt=None):
    dual_class = KLD(C)

    # Direct Otimizer
    # model = CCmodel()
예제 #2
0
from DAMP.FMeasure import FbetaOpt
from datasets.dataRead import a9aReader
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
from fuel.schemes import ShuffledScheme
# Create the objects

# Spade Optimizer
# model = Spade(dual_class)
# Data reader
a9a = a9aReader(location='./datasets/kdd08.')
a9a.read()
input_dim = a9a.input_dim
print "The input dimension is " + str(input_dim)
num_splits = 0
batch_size = 1024
train_dataset, test_dataset, p = a9a.get_split(0)
print "Number of training examples is " + str(train_dataset.num_examples)
print "Number of testing examples is " + str(test_dataset.num_examples)

model = FbetaOpt(p)

train_state = train_dataset.open()
test_state = test_dataset.open()

scheme = ShuffledScheme(examples=train_dataset.num_examples,
                        batch_size=batch_size)
test_scheme = ShuffledScheme(examples=test_dataset.num_examples,
                             batch_size=test_dataset.num_examples)
print "Input dim is " + str(input_dim)
예제 #3
0
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
import sys
from fuel.schemes import ShuffledScheme
from fuel.streams import DataStream
import numpy as np
# Create the objects

# Spade Optimizer
# model = Spade(dual_class)
# Data reader
fName = 'a9a'
if len(sys.argv) > 1:
    fName = sys.argv[1]
a9a = a9aReader(location='./datasets/' + fName + '.')
a9a.read()
input_dim = a9a.input_dim
print "The input dimension is " + str(input_dim)
num_splits = 0
batch_size = 1024
train_dataset, test_dataset, p = a9a.get_split(0)
print "Number of training examples is " + str(train_dataset.num_examples)
print "Number of testing examples is " + str(test_dataset.num_examples)

model = FbetaThresh(p)

train_state = train_dataset.open()
test_state = test_dataset.open()

scheme = ShuffledScheme(examples=train_dataset.num_examples,