Esempio n. 1
0
    def build(self, config):
        """Build the recurrent convolutional net."""
        nets = OrderedDict()

        nets['t_input'] = self.tensor_in  #(12,-1,512)

        nets['reshape_t_input'] = tf.reshape(
            nets['t_input'], (-1, 1, 1, 1, nets['t_input'].shape[-1]))

        nets['bar_main'] = NeuralNet(nets['reshape_t_input'],
                                     config['net_g']['bar_main'],
                                     name='bar_main')

        nets['bar_pitch_time'] = NeuralNet(nets['bar_main'].tensor_out,
                                           config['net_g']['bar_pitch_time'],
                                           name='bar_pitch_time')

        nets['bar_time_pitch'] = NeuralNet(nets['bar_main'].tensor_out,
                                           config['net_g']['bar_time_pitch'],
                                           name='bar_time_pitch')

        config_bar_merged = config['net_g']['bar_merged'].copy()

        if config_bar_merged[-1][1][0] is None:
            l = list(config_bar_merged[-1])
            l[1] = list(l[1])
            l[1][0] = config['deconv_ds']['num_track']
            l[1] = tuple(l[1])
            config_bar_merged[-1] = tuple(l)

        nets['bar_merged'] = NeuralNet(tf.concat([
            nets['bar_pitch_time'].tensor_out,
            nets['bar_time_pitch'].tensor_out
        ], -1),
                                       config_bar_merged,
                                       name='bar_merged')

        nets['t_output'] = nets['bar_merged'].tensor_out[
            ..., :config['deconv_ds']['num_pitch'], :]

        nets['reshape_t_output'] = tf.reshape(
            nets['t_output'],
            (config['deconv_ds']["batch_size"], -1,
             nets['t_output'].shape[-3] * nets['t_output'].shape[-4],
             nets['t_output'].shape[-2], nets['t_output'].shape[-1]))

        tensor_out = nets['reshape_t_output']

        return tensor_out, nets
Esempio n. 2
0
 def learn_test(self):
     net = NeuralNet([1, 3, 1], -1, 1)
     x = [[[-3]], [[2]], [[0]], [[-2]]]
     y = [[[1]], [[1]], [[0]], [[0]]]
     training_set = [x, y]
     J = net.learn(training_set, 5000, 0.5)
     plt.plot(J)
     plt.show()
     res = net.forward_prop([[-3]])
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     print(a)
     print('-----------')
     res = net.forward_prop([[2]])
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     print(a)
     print('-----------')
     res = net.forward_prop([[0]])
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     print(a)
     print('-----------')
     res = net.forward_prop([[-2]])
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     print(a)
     print('-----------')
Esempio n. 3
0
def test1():
	X=np.array([[1,0,1,0],[1,0,1,1],[0,1,0,1]])
	y=np.array([[1],[1],[0]])
	nn = NeuralNet()
	nn.train(X, y, epochs=5000)
	pred = nn.predict(X)
	print(pred)
Esempio n. 4
0
    def __init__(self, maxn=2):
        # Only supports 2 player
        self.maxn = maxn
        # nets is a series of networks mapping nplayers to corresponding nnet
        self.nets = {}
        for i in xrange(2, self.maxn + 1):
            self.nets[i] = NeuralNet(layers=[9, 5, 8, 3, 1],
                                     input_layers=[0, 1],
                                     output_layers=[3, 4],
                                     wiring=[(None, None), (None, None),
                                             ([0, 1], RELU_FUN),
                                             ([2], SOFTMAX_FUN),
                                             ([2, 3], LINEAR_FUN)],
                                     learning_rate=0.00001,
                                     L2REG=0.001,
                                     build=False)

        # To prevent overfitting, share weights between the networks
        # as much as possible
        '''
        for i in xrange(3, self.maxn+1):
            assert self.nets[i]._vweights[3].get_value().shape == self.nets[2]._vweights[3].get_value().shape
            assert self.nets[i]._vbiases[3].get_value().shape == self.nets[2]._vbiases[3].get_value().shape
            assert self.nets[i]._vweights[5].get_value().shape == self.nets[2]._vweights[5].get_value().shape
            assert self.nets[i]._vbiases[5].get_value().shape == self.nets[2]._vbiases[5].get_value().shape
            self.nets[i]._vweights[3] = self.nets[2]._vweights[3]
            self.nets[i]._vbiases[3] = self.nets[2]._vbiases[3]
            self.nets[i]._vweights[5] = self.nets[2]._vweights[5]
            self.nets[i]._vbiases[5] = self.nets[2]._vbiases[5]
            self.nets[i].rebuild()
        '''

        self.nets[2].rebuild()
        self.nets[2]._vbiases[4].set_value(np.array([1.5]))
Esempio n. 5
0
def main():

    args = parser.parse_args()
    print('Options:')
    for (key, value) in iteritems(vars(args)):
        print("{:12}: {}".format(key, value))

    assert os.path.exists(args.xp_dir)

    # default value for basefile: string basis for all exported file names
    if args.out_name:
        base_file = "{}/{}".format(args.xp_dir, args.out_name)
    else:
        base_file = "{}/{}_{}_{}".format(args.xp_dir, args.dataset,
                                         args.solver, args.loss)

    # if pickle file already there, consider run already done
    if (os.path.exists("{}_weights.p".format(base_file))
            and os.path.exists("{}_results.p".format(base_file))):
        sys.exit()

    # computation device
    if 'gpu' in args.device:
        try:  # Theano-1.0.2
            theano.gpuarray.use(args.device)
        except:  # Theano-0.8.2
            theano.sandbox.cuda.use(args.device)

    np.random.seed(args.seed)

    # set save_at to n_epochs if not provided
    save_at = args.n_epochs if not args.save_at else args.save_at

    log_file = "{}/log_{}.txt".format(args.xp_dir, args.dataset)
    save_to = "{}_weights.p".format(base_file)
    weights = "{}/{}_weights.p".format(args.xp_dir, args.in_name) \
        if args.in_name else None

    # update config data
    Cfg.C.set_value(args.C)
    Cfg.batch_size = args.batch_size
    Cfg.compile_lwsvm = False
    Cfg.learning_rate.set_value(args.lr)
    Cfg.softmax_loss = (args.loss == 'ce')

    # train
    nnet = NeuralNet(dataset=args.dataset, use_weights=weights)
    nnet.train(solver=args.solver,
               n_epochs=args.n_epochs,
               save_at=save_at,
               save_to=save_to)

    # log
    nnet.log.save_to_file("{}_results.p".format(base_file))
    nnet.dump_weights("{}_final_weights.p".format(base_file))

    logged = open(log_file, "a")
    logged.write("{}\t{}\t{}: OK\n".format(args.dataset, args.solver,
                                           args.loss))
    logged.close()
Esempio n. 6
0
def train_xor_network():
    # two training sets
    training_one = [
        Instance([0, 0], [0]),
        Instance([0, 1], [1]),
        Instance([1, 0], [1]),
        Instance([1, 1], [0])
    ]
    training_two = [
        Instance([0, 0], [0, 0]),
        Instance([0, 1], [1, 1]),
        Instance([1, 0], [1, 1]),
        Instance([1, 1], [0, 0])
    ]

    settings = {
        # Required settings
        "n_inputs": 2,  # Number of network input signals
        "n_outputs": 1,  # Number of desired outputs from the network
        "n_hidden_layers": 1,  # Number of nodes in each hidden layer
        "n_hiddens": 2,  # Number of hidden layers in the network
        "activation_functions": [
            tanh_function, sigmoid_function
        ],  # specify activation functions per layer eg: [ hidden_layer, output_layer ]

        # Optional settings
        "weights_low": -0.1,  # Lower bound on initial weight range
        "weights_high": 0.1,  # Upper bound on initial weight range
        "save_trained_network":
        False,  # Whether to write the trained weights to disk
        "input_layer_dropout": 0.0,  # dropout fraction of the input layer
        "hidden_layer_dropout": 0.1,  # dropout fraction in all hidden layers
        "batch_size":
        0,  # 1 := online learning, 0 := entire trainingset as batch, else := batch learning size
    }

    # initialize the neural network
    global network
    network = NeuralNet(settings)

    # load a stored network configuration
    # network = NeuralNet.load_from_file( "xor_trained_configuration.pkl" )

    # start training on test set one
    network.backpropagation(
        training_one,  # specify the training set
        ERROR_LIMIT=1e-6,  # define an acceptable error limit 
        learning_rate=0.03,  # learning rate
        momentum_factor=0.95  # momentum
    )

    # Test the network by looping through the specified dataset and print the results.
    for instance in training_one:
        print "Input: {features} -> Output: {output} \t| target: {target}".format(
            features=str(instance.features),
            output=str(network.update(np.array([instance.features]))),
            target=str(instance.targets))

    # save the trained network
    network.save_to_file("networks/XOR_Operator/XOR_Operator.obj")
Esempio n. 7
0
    def build(self, config):
        """Build the discriminator. 
        Dataset in dis_ds do config"""
        nets = OrderedDict()
        config = deepcopy(config)

        nets['t_input'] = self.tensor_in
        nets['t_seqlen'] = self.tensor_len

        config['conv_ds'] = config['dis_ds']

        nets['conv'] = ConvNet(self.tensor_in, config)

        lstm_cell = tf.nn.rnn_cell.LSTMCell(config['net_d']['rnn_features'],
                                            state_is_tuple=True,
                                            name="lstm")
        cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell], state_is_tuple=True)
        init_state = cells.zero_state(config['dis_ds']['batch_size'],
                                      tf.float32)
        nets['rnn_outputs'], nets['final_state'] = tf.nn.dynamic_rnn(
            cells,
            nets['conv'].tensor_out,
            initial_state=init_state,
            sequence_length=nets['t_seqlen'])

        nets['full_connected'] = NeuralNet(nets['rnn_outputs'][:, -1, :],
                                           config['net_d']['full_connected'],
                                           name='full_connected')

        nets['t_output'] = nets['full_connected'].tensor_out

        return nets['t_output'], nets
 def __init__(self):
     '''
     A container for NeuralEditElement representing the GUI components of a neural net
     '''
     self.Net = NeuralNet()
     self.NetPath = None  # set during pickle op
     self.Elements = []   # elements are UI representation of individual neurons
     self.LookupTable = {}
Esempio n. 9
0
    def test_predict(self):
        neural_net = NeuralNet(
            input_size=3,
            hidden_size=3,
            output_size=1,
        )

        result = neural_net.predict([1, 1, 1])
        self.assertEquals(result, 6)
Esempio n. 10
0
 def __init__(self, name: str, position: list, surround=None):
     self._position = position
     self.name = name
     self.color = [rand_unif(0, 1),
                   rand_unif(0, 1),
                   rand_unif(0, 1)]
     self.field_of_vision = surround  # sense surroundings
     self._reserves = 0
     self.nn = NeuralNet(8, 4, 2)
     self.genome = []
Esempio n. 11
0
def test2():
	X1 = np.array([[0,0],
					[0,1],
               		[1,0],
               		[1,1]])
	y1 = np.array([0,1,1,0])
	nn = NeuralNet(input_layer=2, hidden_layer=4, output_layer=1)
	nn.train(X1, y1)
	pred = nn.predict(X1)
	print(pred)
Esempio n. 12
0
 def calculate_gradient_test(self):
     net = NeuralNet([1, 3, 1], -1, 1)
     net_layers = net.return_net()
     net_layers[0].set_matrix(numpy.array([[1, 1], [2, 2], [3, 3]]))
     net_layers[1].set_matrix(numpy.array([1, 2, 3, 4]))
     net_layers[1].set_matrix(
         numpy.reshape(net_layers[1].return_matrix(), (1, 4)))
     net.set_net(net_layers)
     x = numpy.array([[-3]])
     res = net.calculate_gradients([[x], [1]])
Esempio n. 13
0
 def net_create_test(self):
     net = NeuralNet([1, 3, 1], -1, 1)
     net_layers = net.return_net()
     l = len(net_layers)
     design = [1, 3, 1]
     for i in range(l):
         with self.subTest(i=i):
             layer = net_layers[i]
             self.assertEqual(layer.return_amount_of_neurons(), design[i],
                              i)
Esempio n. 14
0
 def back_prop_test(self):
     net = NeuralNet([1, 3, 1], -1, 1)
     net_layers = net.return_net()
     net_layers[0].set_matrix(numpy.array([[1, 1], [2, 2], [3, 3]]))
     net_layers[1].set_matrix(numpy.array([1, 2, 3, 4]))
     net_layers[1].set_matrix(
         numpy.reshape(net_layers[1].return_matrix(), (1, 4)))
     net.set_net(net_layers)
     x = numpy.array([[-3]])
     forward_res = net.forward_prop(x)
     res = net.back_prop(1, forward_res)
Esempio n. 15
0
def main():
    X, Y = create2DData()
    plot2DData(X, Y)
    noOfLayers = 2  # Hidden and Output layer (Excluding the input layer)
    layerDimensions = [2, 3, 1]  # No of units in Input, Hidden, Output layer
    noOfIterations = 6000
    learningRate = 0.6
    N = NeuralNet(noOfLayers, layerDimensions)  # Create a object of Neural Net
    AL, WL, bL = N.gradientDescent(X,
                                   noOfIterations,
                                   learningRate,
                                   Y,
                                   printCost=True)
Esempio n. 16
0
def main():

    args = parser.parse_args()
    print('Options:')
    for (key, value) in iteritems(vars(args)):
        print("{:12}: {}".format(key, value))

    assert os.path.exists(args.xp_dir)

    Cfg.C.set_value(args.C)
    Cfg.D.set_value(args.D)
    Cfg.batch_size = args.batch_size
    Cfg.compile_lwsvm = True
    Cfg.softmax_loss = False

    # default value for basefile: string basis for all exported file names
    if args.out_name:
        base_file = "{}/{}".format(args.xp_dir, args.out_name)
    else:
        base_file = "{}/{}_lwsvm".format(args.xp_dir, args.in_name)

    # if pickle file already there, consider run already done
    if (os.path.exists("{}_final_weights.p".format(base_file))
            and os.path.exists("{}_results.p".format(base_file))):
        sys.exit()

    # computation device
    if 'gpu' in args.device:
        try:  # Theano-1.0.2
            theano.gpuarray.use(args.device)
        except:  # Theano-0.8.2
            theano.sandbox.cuda.use(args.device)

    np.random.seed(args.seed)

    log_file = "{}/log_{}.txt".format(args.xp_dir, args.dataset)
    if args.dataset != 'imagenet':
        weights = "{}/{}_weights.p".format(args.xp_dir, args.in_name)
    else:
        weights = "{}/vgg16.pkl".format(args.xp_dir)

    nnet = NeuralNet(dataset=args.dataset, use_weights=weights)
    nnet.train(solver="svm")

    # log
    nnet.log.save_to_file("{}_results.p".format(base_file))
    nnet.dump_weights("{}_final_weights.p".format(base_file))

    logged = open(log_file, "a")
    logged.write("{}\t{}\tlwsvm: OK\n".format(args.dataset, args.in_name))
    logged.close()
Esempio n. 17
0
def main():
    X, Y = generateOneDData()
    # plotOneDData(X, Y)
    noOfLayers = 2  # Hidden and Output layer (Excluding the input layer)
    layerDimensions = [1, 2, 1]  # No of units in Input, Hidden, Output layer
    noOfIterations = 5000
    learningRate = 0.6
    N = NeuralNet(noOfLayers, layerDimensions)  # Create a object of Neural Net
    AL, WL, bL = N.gradientDescent(X,
                                   noOfIterations,
                                   learningRate,
                                   Y,
                                   printCost=True)
    plotTransformedData(AL, WL, bL, Y)
Esempio n. 18
0
 def forward_prop_test(self):
     net = NeuralNet([3, 3, 3], -1, 1)
     net_layers = net.return_net()
     net_layers[0].set_matrix(
         numpy.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]))
     net_layers[1].set_matrix(
         numpy.array([[-1, 1, -1, 1], [-2, 2, -2, 2], [3, -3, 3, -3]]))
     net.set_net(net_layers)
     x = numpy.array([[0.5], [-0.5], [-0.7]])
     res = net.forward_prop(x)
     res_a = res[0]
     a = res_a[len(res_a) - 1]
     # a = a[1:]
     expected_res = numpy.array([[0.41089559], [0.3272766], [0.746644]])
     self.assertEqual(a.all(), expected_res.all())
Esempio n. 19
0
    def __init__(self):
        self.brain = NeuralNet(settings.NUM_INPUTS, settings.NUM_OUTPUTS,
                               settings.NUM_HIDDEN,
                               settings.NEURONS_PER_HIDDEN)

        self.position = Vector2D(random() * settings.WINDOW_WIDTH,
                                 random() * settings.WINDOW_HEIGHT)

        self.look_at = Vector2D()
        self.rotation = random() * 2 * math.pi
        self.ltrack = 0.16
        self.rtrack = 0.16
        self.fitness = 0.0
        self.scale = settings.SWEEPER_SCALE
        self.closest_mine = 0
        self.speed = 0.0
Esempio n. 20
0
def label_data():
    p = excel_reader.get_data(DATA_FROM, DATA_TO, 'D:\python\projdata\data\\1m.xlsx')
    log_price = np.log(p)
    #plt.plot(p)
    topology = [14, 100, 100, 50, 20, 2]
    nn = NeuralNet(topology)
    #nn = nn_factory.read('net_11_7d')
    #index = comp_index_matrix(p)
    #b, s = comp_b_s(nn, p, index)
    #plt.plot(index[0,:])
    #comp_loss(b, s, index)
    #plt.plot(p / 4000 - 1)
    #db, ds = grad_b_s(b, s, index)
    #plt.plot(ds * 10)
    #plt.show()
    lb, ls = gradient_descent(nn, p, log_price, STEPS, LEARNING_RATE)
    plt.plot(lb)
    plt.show()
    nn.save('net_final')
Esempio n. 21
0
def rundeltamultibatch(n_nodes_v, epochs):
    start = time.time()
    # Create weight matrices
    net = NeuralNet()
    n_layers = len(n_nodes_v) - 1
    print("Layers = ", n_layers)
    for l in range(n_layers + 1):
        net.add_layer(n_nodes_v[l])

    print(net)
    net.generate_weights()
    err = []
    val = []
    x = []
    net.set_inputs(0, shuffledx)
    net.set_y(0, shuffledx)
    net.forward_pass()
    net.backward_pass(T)
    step_v = np.vectorize(step)
    first_classification = T - (np.ceil(net.get_outputs()) * 2 - 1)
    #     print(net.get_outputs())
    for epoch in range(epochs):
        net.forward_pass()
        net.backward_pass(T)
        net.update()
        #         print(np.mean(np.square(net.get_error())))
        x.append(epoch)
        err.append(np.mean(np.square(net.get_error())))
        val.append(np.count_nonzero(T - (np.ceil(net.get_outputs()) * 2 - 1)))


#     print(first_classification)
#     print(T - (np.ceil(net.get_outputs())*2-1))
#     print(T)
#     print(net.get_outputs())
#     print(np.ceil(net.get_outputs())*2-1)
    print("Time elapsed: ", round(time.time() - start, 3), " seconds")
    plot_err_val(x, err, val)
    # plotpoints(shuffledx)
    show_plots()
Esempio n. 22
0
def evaluate(weights_file):

    Cfg.compile_lwsvm = False
    Cfg.batch_size = 1
    Cfg.C.set_value(1e3)

    nnet = NeuralNet(dataset="imagenet", use_weights=weights_file)

    n_batches = int(50000. / Cfg.batch_size)
    make_fully_convolutional = compile_make_fully_convolutional(nnet)
    print("Weight transformation compiled.")
    make_fully_convolutional()
    print("Network has been made fully convolutional.")

    eval_fun = compile_eval_function(nnet)
    print("Evaluation function compiled")

    # full pass over the validation data:
    top1_acc = 0
    top5_acc = 0
    val_batches = 0
    count_images = 0
    for batch in tqdm(nnet.data.get_epoch_val(), total=n_batches):

        inputs, targets, _ = batch
        inputs = np.concatenate((inputs, inputs[:, :, :, ::-1]))
        top1, top5 = eval_fun(inputs, targets)
        top1_acc += top1
        top5_acc += top5
        val_batches += 1
        count_images += len(targets)

    print("(Used %i samples in validation)" % count_images)
    top1_acc *= 100. / val_batches
    top5_acc *= 100. / val_batches

    print("Top-1 validation accuracy: %g%%" % top1_acc)
    print("Top-5 validation accuracy: %g%%" % top5_acc)
Esempio n. 23
0
def main():

    # Imports and converts training and test data to useable form
    training_data = rd.read_data('data/training.txt')
    rd.hot_encode(training_data)
    training_data = rd.to_object(training_data)

    test_data = rd.read_data('data/testing.txt')
    rd.hot_encode(test_data)
    test_data = rd.to_object(test_data)

    # Initialize neural network
    net = NeuralNet([64, 90, 10], 0.25, -0.3, 0.3)

    # Train neural network with 5 epochs
    net.train_network(training_data, 5)

    # Display accuracies for training and testing dataset
    print('\nFinal Testing Accuracy')
    print(net.accuracy(test_data))

    print('\nFinal Training Accuracy:')
    print(net.accuracy(training_data))
def main():
    mnist_path = os.path.join(os.getcwd(), "MNIST")
    (train_images, train_labels), (test_images,
                                   test_labels) = load_data(mnist_path)

    layers = [
        LinearLayer(32, 28**2, xavier),
        SigmoidLayer(),
        LinearLayer(32, 32, xavier),
        SigmoidLayer(),
        LinearLayer(10, 32, xavier),
        SigmoidLayer()
    ]
    net = NeuralNet(layers)

    np.seterr(over='ignore')
    train(net,
          train_images,
          train_labels,
          flatten_mnist_input,
          mnist_label_as_one_hot,
          epoch_count=1000,
          batch_size=1)

    confusion_matrix = DataFrame(np.zeros((10, 10)),
                                 index=range(10),
                                 columns=range(10))
    evaluator = test(net,
                     test_images,
                     test_labels,
                     confusion_matrix,
                     flatten_mnist_input,
                     highest_output_neuron,
                     mnist_label_as_one_hot,
                     title="POST-TRAIN")
    evaluator.plot()
Esempio n. 25
0
from neuralnet import NeuralNet, Layer, Node

data = load_iris()
target = data['target'].tolist()

actual = []

for i in target:
    actual.append([1 if i == j else 0 for j in xrange(0, 3)])

dataset = zip(data['data'].tolist(), actual)
shuffle(dataset)
train = dataset[:101]
test = dataset[101:]

nn = NeuralNet()
nn.set_layers([
    Layer('input', 4),
    Layer('hidden', 10, "LReLU"),
    Layer('output', 3, "LReLU")
])

score = 0.0
for i in test:
    p = nn.predict(i)
    if p.index(max(p)) == i[1].index(1):
        score += 1

# Expect a value around 0.333, since there is a
# 1 in 3 chance to randomly guess correctly
print("Before: {}\n".format(score / len(test)))
Esempio n. 26
0
    Instance([0, 0], [0, 0]),
    Instance([0, 1], [1, 1]),
    Instance([1, 0], [1, 1]),
    Instance([1, 1], [0, 0])
]

n_inputs = 2
n_outputs = 1
n_hiddens = 2
n_hidden_layers = 1

# specify activation functions per layer eg: [ hidden_layer_1, hidden_layer_2, output_layer ]
activation_functions = [tanh_function] * n_hidden_layers + [sigmoid_function]

# initialize the neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens, n_hidden_layers,
                    activation_functions)

# start training on test set one
network.backpropagation(training_one,
                        ERROR_LIMIT=1e-4,
                        learning_rate=0.3,
                        momentum_factor=0.9)

# save the trained network
network.save_to_file("trained_configuration.pkl")

# load a stored network configuration
# network = NeuralNet.load_from_file( "trained_configuration.pkl" )

# print out the result
for instance in training_one:
Esempio n. 27
0
    # Insert blanks at alternate locations in the labelling (blank is nClasses)
    y1 = [nClasses]
    for char in y:
        y1 += [char, nClasses]

    data_y.append(np.asarray(y1, dtype=np.int32))
    data_x.append(np.asarray(x, dtype=th.config.floatX))

    if labels_len(y1) > (1 + len(x[0])) // conv_sz:
        bad_data = True
        show_all(y1, x, None, x[:, ::conv_sz], "Squissed")

################################
print("Building the Network")

ntwk = NeuralNet(nDims, nClasses, midlayer, midlayerargs, log_space)

print("Training the Network")
for epoch in range(nEpochs):
    print('Epoch : ', epoch)
    for samp in range(nSamples):
        x = data_x[samp]
        y = data_y[samp]
        # if not samp % 500:            print(samp)

        if samp < nTrainSamples:
            if log_space and len(y) < 2:
                continue

            cst, pred, aux = ntwk.trainer(x, y)
            if (epoch % 10 == 0 and samp < 3) or np.isinf(cst):
Esempio n. 28
0
def dump_results(xp_dir, out_file):

    results = dict()

    if os.path.exists('{}/log_mnist.txt'.format(xp_dir)):
        dataset = 'mnist'

    elif os.path.exists('{}/log_cifar10.txt'.format(xp_dir)):
        dataset = 'cifar10'

    elif os.path.exists('{}/log_cifar100.txt'.format(xp_dir)):
        dataset = 'cifar100'

    else:
        raise NotImplementedError(
            'Could not find appropriate log file in {}'.format(xp_dir))

    results['dataset'] = dataset

    for base_solver in ["adagrad", "adadelta", "adam"]:

        lwsvm_solver = "{}_lwsvm".format(base_solver)
        full_solver = "{}_full".format(base_solver)

        # base solver: baseline
        results[base_solver] = dict()

        # full solver: baseline trained for longer to check
        # training was not stopped prematurely
        results[full_solver] = dict()

        # lwsvm solver: lwsvm applied after base solver
        results[lwsvm_solver] = dict()

        # unpickle results dumped from experiments
        base = pickle.load(
            open("{}/{}_{}_svm_results.p".format(xp_dir, dataset, base_solver),
                 "rb"))
        lwsvm = pickle.load(
            open(
                "{}/{}_{}_svm_lwsvm_results.p".format(xp_dir, dataset,
                                                      base_solver), "rb"))

        # performance of full solver
        results[full_solver]["train_objective"] = base['train_objective'][-1]
        results[full_solver]["train_accuracy"] = base['train_accuracy'][-1]
        results[full_solver]["test_accuracy"] = base['test_accuracy']
        results[full_solver]["n_epochs"] = len(base['time_stamp'])
        results[full_solver]["time"] = base['time_stamp'][-1]

        # performance of lwsvm solver
        results[lwsvm_solver]["train_objective"] = lwsvm['train_objective'][-1]
        results[lwsvm_solver]["train_accuracy"] = lwsvm['train_accuracy'][-1]
        results[lwsvm_solver]["test_accuracy"] = lwsvm['test_accuracy']
        results[lwsvm_solver]["time"] = lwsvm['time_stamp'][-1]

        # compute performance of base solver based on saved weights
        use_weights = "{}/{}_{}_svm_weights.p"\
            .format(xp_dir, dataset, base_solver)
        nnet = NeuralNet(dataset=dataset, use_weights=use_weights)
        Cfg.C.set_value(base['C'])
        Cfg.softmax_loss = False

        opt.sgd.updates.create_update(nnet)
        train_obj, train_acc = performance(nnet, which_set='train')
        _, test_acc = performance(nnet, which_set='test')

        # number of epochs of pre-training
        n_epochs = base['save_at']

        results[base_solver]["n_epochs"] = n_epochs
        results[base_solver]["time"] = base['time_stamp'][n_epochs]
        results[base_solver]["train_objective"] = train_obj
        results[base_solver]["train_accuracy"] = train_acc
        results[base_solver]["test_accuracy"] = test_acc

    pickle.dump(results, open(out_file, "wb"))
Esempio n. 29
0
X_eval17_window_np = X_eval17_window.values
X_eval17_window_npb = np.insert(X_eval17_window_np,
                                X_eval17_window_np.shape[1],
                                1,
                                axis=1)

# (I.2) For all periods loop through the stations and make predictions

# train
for key in y_train:
    # Blank array for actual period, given station
    yhat = np.array([])
    # Loop through time for actual period, given station
    for t in range(0, len(y_train)):
        yhat = np.append(
            yhat, np.float(NeuralNet(X_train_npb[t, :], W_hat[key], False)))
    yhat_train[key] = yhat
    uhat_train[key] = y_train[key] - yhat
    yhat_train = pd.DataFrame(yhat_train, index=y_train.index)
    uhat_train = pd.DataFrame(uhat_train, index=y_train.index)
# test
for key in y_test:
    # Blank array for actual period, given station
    yhat = np.array([])
    # Loop through time for actual period, given station
    for t in range(0, len(y_test)):
        yhat = np.append(
            yhat, np.float(NeuralNet(X_test_npb[t, :], W_hat[key], False)))
    yhat_test[key] = yhat
    uhat_test[key] = y_test[key] - yhat
    yhat_test = pd.DataFrame(yhat_test, index=y_test.index)
Esempio n. 30
0
def main():

    args = parser.parse_args()
    print('Options:')
    for (key, value) in vars(args).iteritems():
        print("{:16}: {}".format(key, value))

    assert os.path.exists(args.xp_dir)

    # default value for basefile: string basis for all exported file names
    if args.out_name:
        base_file = "{}/{}".format(args.xp_dir, args.out_name)
    else:
        base_file = "{}/{}_{}_{}".format(args.xp_dir, args.dataset,
                                         args.solver, args.loss)

    # if pickle file already there, consider run already done
    if (os.path.exists("{}_weights.p".format(base_file))
            and os.path.exists("{}_results.p".format(base_file))):
        sys.exit()

    # computation device
    if 'gpu' in args.device:
        theano.sandbox.cuda.use(args.device)

    # set save_at to n_epochs if not provided
    save_at = args.n_epochs if not args.save_at else args.save_at

    save_to = "{}_weights.p".format(base_file)
    weights = "../log/{}.p".format(args.in_name) if args.in_name else None

    # update config data

    # plot parameters
    Cfg.xp_path = args.xp_dir

    # dataset
    Cfg.seed = args.seed
    Cfg.out_frac = args.out_frac
    Cfg.ad_experiment = bool(args.ad_experiment)
    Cfg.weight_dict_init = bool(args.weight_dict_init)
    Cfg.pca = bool(args.pca)
    Cfg.unit_norm_used = args.unit_norm_used
    Cfg.gcn = bool(args.gcn)
    Cfg.zca_whitening = bool(args.zca_whitening)
    Cfg.mnist_val_frac = args.mnist_val_frac
    Cfg.mnist_bias = bool(args.mnist_bias)
    Cfg.mnist_rep_dim = args.mnist_rep_dim
    Cfg.mnist_architecture = args.mnist_architecture
    Cfg.mnist_normal = args.mnist_normal
    Cfg.mnist_outlier = args.mnist_outlier
    Cfg.cifar10_bias = bool(args.cifar10_bias)
    Cfg.cifar10_rep_dim = args.cifar10_rep_dim
    Cfg.cifar10_architecture = args.cifar10_architecture
    Cfg.cifar10_normal = args.cifar10_normal
    Cfg.cifar10_outlier = args.cifar10_outlier
    Cfg.gtsrb_rep_dim = args.gtsrb_rep_dim

    # neural network
    Cfg.softmax_loss = (args.loss == 'ce')
    Cfg.svdd_loss = (args.loss == 'svdd')
    Cfg.kde_loss = (args.loss == 'kde_loss')
    Cfg.deep_GMM_loss = (args.loss == 'deep_GMM')
    Cfg.reconstruction_loss = (args.loss == 'autoencoder')
    Cfg.use_batch_norm = bool(args.use_batch_norm)
    Cfg.learning_rate.set_value(args.lr)
    Cfg.lr_decay = bool(args.lr_decay)
    Cfg.lr_decay_after_epoch = args.lr_decay_after_epoch
    Cfg.lr_drop = bool(args.lr_drop)
    Cfg.lr_drop_in_epoch = args.lr_drop_in_epoch
    Cfg.lr_drop_factor = args.lr_drop_factor
    Cfg.momentum.set_value(args.momentum)
    if args.solver == "rmsprop":
        Cfg.rho.set_value(0.9)
    if args.solver == "adadelta":
        Cfg.rho.set_value(0.95)
    Cfg.block_coordinate = bool(args.block_coordinate)
    Cfg.k_update_epochs = args.k_update_epochs
    Cfg.center_fixed = bool(args.center_fixed)
    Cfg.R_update_solver = args.R_update_solver
    Cfg.R_update_scalar_method = args.R_update_scalar_method
    Cfg.R_update_lp_obj = args.R_update_lp_obj
    Cfg.warm_up_n_epochs = args.warm_up_n_epochs
    Cfg.batch_size = args.batch_size
    Cfg.leaky_relu = bool(args.leaky_relu)

    # Pre-training and autoencoder configuration
    Cfg.pretrain = bool(args.pretrain)
    Cfg.ae_loss = args.ae_loss
    Cfg.ae_lr_drop = bool(args.ae_lr_drop)
    Cfg.ae_lr_drop_in_epoch = args.ae_lr_drop_in_epoch
    Cfg.ae_lr_drop_factor = args.ae_lr_drop_factor
    Cfg.ae_weight_decay = bool(args.ae_weight_decay)
    Cfg.ae_C.set_value(args.ae_C)

    # SVDD parameters
    Cfg.nu.set_value(args.nu)
    Cfg.c_mean_init = bool(args.c_mean_init)
    if args.c_mean_init_n_batches == -1:
        Cfg.c_mean_init_n_batches = "all"
    else:
        Cfg.c_mean_init_n_batches = args.c_mean_init_n_batches
    Cfg.hard_margin = bool(args.hard_margin)

    # regularization
    Cfg.weight_decay = bool(args.weight_decay)
    Cfg.C.set_value(args.C)
    Cfg.reconstruction_penalty = bool(args.reconstruction_penalty)
    Cfg.C_rec.set_value(args.C_rec)
    Cfg.dropout = bool(args.dropout)
    Cfg.dropout_architecture = bool(args.dropout_arch)

    # diagnostics
    Cfg.nnet_diagnostics = bool(args.nnet_diagnostics)
    Cfg.e1_diagnostics = bool(args.e1_diagnostics)
    Cfg.ae_diagnostics = bool(args.ae_diagnostics)

    # train
    nnet = NeuralNet(dataset=args.dataset,
                     use_weights=weights,
                     pretrain=Cfg.pretrain)
    # pre-train weights via autoencoder, if specified
    if Cfg.pretrain:
        nnet.pretrain(solver="adam", lr=0.0001, n_epochs=1)

    nnet.train(solver=args.solver,
               n_epochs=args.n_epochs,
               save_at=save_at,
               save_to=save_to)

    # pickle/serialize AD results
    if Cfg.ad_experiment:
        nnet.log_results(filename=Cfg.xp_path + "/AD_results.p")

    # text log
    nnet.log.save_to_file("{}_results.p".format(base_file))  # save log
    log_exp_config(Cfg.xp_path, args.dataset)
    log_NeuralNet(Cfg.xp_path, args.loss, args.solver, args.lr, args.momentum,
                  None, args.n_epochs, args.C, args.C_rec, args.nu)
    if Cfg.ad_experiment:
        log_AD_results(Cfg.xp_path, nnet)

    # plot diagnostics
    if Cfg.nnet_diagnostics:
        # common suffix for plot titles
        str_lr = "lr = " + str(args.lr)
        C = int(args.C)
        if not Cfg.weight_decay:
            C = None
        str_C = "C = " + str(C)
        Cfg.title_suffix = "(" + args.solver + ", " + str_C + ", " + str_lr + ")"

        if args.loss == 'autoencoder':
            plot_ae_diagnostics(nnet, Cfg.xp_path, Cfg.title_suffix)
        else:
            plot_diagnostics(nnet, Cfg.xp_path, Cfg.title_suffix)

    plot_filters(nnet, Cfg.xp_path, Cfg.title_suffix)

    # If AD experiment, plot most anomalous and most normal
    if Cfg.ad_experiment:
        n_img = 32
        plot_outliers_and_most_normal(nnet, n_img, Cfg.xp_path)